source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
DispatchDialogue.py | ##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import sys
import threading
import traceback
import IECore
import Gaffer
import GafferDispatch
import GafferUI
## A dialogue which can be used to dispatch tasks
class DispatchDialogue( GafferUI.Dialogue ) :
## Defines what happens when the tasks have been successfully dispatched :
#
# Close : The dialogue is closed immediately.
#
# Confirm : The dialogue remains open confirming success, with a button for returning to the editing state.
PostDispatchBehaviour = IECore.Enum.create( "Close", "Confirm" )
__dispatchDialogueMenuDefinition = None
def __init__( self, tasks, dispatchers, nodesToShow, postDispatchBehaviour=PostDispatchBehaviour.Confirm, title="Dispatch Tasks", sizeMode=GafferUI.Window.SizeMode.Manual, **kw ) :
GafferUI.Dialogue.__init__( self, title, sizeMode=sizeMode, **kw )
self._getWidget().setBorderStyle( GafferUI.Frame.BorderStyle.None )
self.__dispatchers = dispatchers
self.__tasks = tasks
self.__nodesToShow = nodesToShow
self.__script = tasks[0].scriptNode()
# hold a reference to the script window so plugs which launch child windows work properly.
# this is necessary for PlugValueWidgets like color swatches and ramps. Ideally those widgets
# wouldn't rely on the existence of a ScriptWindow and we could drop this acquisition.
self.__scriptWindow = GafferUI.ScriptWindow.acquire( self.__script )
self.__postDispatchBehaviour = postDispatchBehaviour
# build tabs for all the node, dispatcher, and context settings
with GafferUI.ListContainer() as self.__settings :
mainMenu = GafferUI.MenuBar( self.menuDefinition() )
mainMenu.setVisible( False )
with GafferUI.TabbedContainer() as self.__tabs :
for node in self.__nodesToShow :
nodeFrame = GafferUI.Frame( borderStyle=GafferUI.Frame.BorderStyle.None, borderWidth=0 )
nodeFrame.addChild( self.__nodeEditor( node ) )
# remove the per-node execute button
Gaffer.Metadata.registerValue( node, "layout:customWidget:dispatchButton:widgetType", "", persistent = False )
self.__tabs.setLabel( nodeFrame, node.relativeName( self.__script ) )
with GafferUI.ListContainer() as dispatcherTab :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=2, borderWidth=4 ) as dispatcherMenuColumn :
GafferUI.Label( "<h4>Dispatcher</h4>" )
self.__dispatchersMenu = GafferUI.MultiSelectionMenu( allowMultipleSelection = False, allowEmptySelection = False )
self.__dispatchersMenu.append( [ x.getName() for x in self.__dispatchers ] )
self.__dispatchersMenu.setSelection( [ self.__dispatchers[0].getName() ] )
self.__dispatchersMenuChanged = self.__dispatchersMenu.selectionChangedSignal().connect( Gaffer.WeakMethod( self.__dispatcherChanged ) )
dispatcherMenuColumn.setVisible( len(self.__dispatchers) > 1 )
self.__dispatcherFrame = GafferUI.Frame( borderStyle=GafferUI.Frame.BorderStyle.None, borderWidth=0 )
self.__tabs.setLabel( dispatcherTab, "Dispatcher" )
with GafferUI.Frame( borderStyle=GafferUI.Frame.BorderStyle.None, borderWidth=4 ) as contextTab :
GafferUI.PlugValueWidget.create( self.__script["variables"] )
self.__tabs.setLabel( contextTab, "Context Variables" )
# build a ui element for progress feedback and messages
with GafferUI.ListContainer( spacing = 4 ) as self.__progressUI :
with GafferUI.ListContainer( parenting = { "horizontalAlignment" : GafferUI.HorizontalAlignment.Center, "verticalAlignment" : GafferUI.VerticalAlignment.Center } ) :
self.__progressIconFrame = GafferUI.Frame( borderStyle = GafferUI.Frame.BorderStyle.None, parenting = { "horizontalAlignment" : GafferUI.HorizontalAlignment.Center } )
self.__progressLabel = GafferUI.Label( parenting = { "horizontalAlignment" : GafferUI.HorizontalAlignment.Center } )
with GafferUI.Collapsible( "Details", collapsed = True, parenting = { "expand" : True } ) as self.__messageCollapsible :
self.__messageWidget = GafferUI.MessageWidget()
# connect to the collapsible state change so we can increase the window
# size when the details pane is first shown.
self.__messageCollapsibleConneciton = self.__messageCollapsible.stateChangedSignal().connect( Gaffer.WeakMethod( self.__messageCollapsibleChanged ) )
self.__backButton = self._addButton( "Back" )
self.__backButtonConnection = self.__backButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__initiateSettings ) )
self.__primaryButton = self._addButton( "Dispatch" )
self.__setDispatcher( dispatchers[0] )
self.__initiateSettings( self.__primaryButton )
@staticmethod
def createWithDefaultDispatchers( tasks, nodesToShow, defaultDispatcherType=None, postDispatchBehaviour=PostDispatchBehaviour.Confirm, title="Dispatch Tasks", sizeMode=GafferUI.Window.SizeMode.Manual, **kw ) :
defaultType = defaultDispatcherType if defaultDispatcherType else GafferDispatch.Dispatcher.getDefaultDispatcherType()
dispatcherTypes = list(GafferDispatch.Dispatcher.registeredDispatchers())
if defaultType and defaultType in dispatcherTypes :
dispatcherTypes.remove( defaultType )
dispatcherTypes.insert( 0, defaultType )
dispatchers = []
for key in dispatcherTypes :
dispatcher = GafferDispatch.Dispatcher.create( key )
Gaffer.NodeAlgo.applyUserDefaults( dispatcher )
dispatchers.append( dispatcher )
return DispatchDialogue( tasks, dispatchers, nodesToShow, postDispatchBehaviour=postDispatchBehaviour, title = title, sizeMode = sizeMode, **kw )
def scriptNode( self ) :
return self.__script
def setVisible( self, visible ) :
if visible :
# See comment in `GafferUI.NodeSetEditor.acquire()`
self._qtWidget().resize( 400, 400 )
GafferUI.Window.setVisible( self, visible )
## Returns an IECore.MenuDefinition which is used to define the keyboard shortcuts for all DispatchDialogues.
# This can be edited at any time to modify subsequently created DispatchDialogues.
# Typically editing would be done as part of gaffer startup. Note that this menu is never shown to users,
# but we need it in order to register keyboard shortcuts.
@classmethod
def menuDefinition( cls ) :
if cls.__dispatchDialogueMenuDefinition is None :
cls.__dispatchDialogueMenuDefinition = IECore.MenuDefinition()
return cls.__dispatchDialogueMenuDefinition
def __nodeEditor( self, node ) :
editor = GafferUI.NodeEditor( self.__script )
editor.setNodeSet( Gaffer.StandardSet( [ node ] ) )
## \todo: Expose public API for the NodeEditor's NameWidget visibility
editor._NodeEditor__nameWidget.setVisible( False )
editor._NodeEditor__nameWidget.parent()[0].setVisible( False )
return editor
def __setDispatcher( self, dispatcher ) :
self.__currentDispatcher = dispatcher
self.__dispatcherFrame.setChild( self.__nodeEditor( self.__currentDispatcher ) )
def __dispatcherChanged( self, menu ) :
for dispatcher in self.__dispatchers :
if dispatcher.getName() == menu.getSelection()[0] :
self.__setDispatcher( dispatcher )
return
def __initiateSettings( self, button ) :
self.__backButton.setEnabled( False )
self.__backButton.setVisible( False )
self.__primaryButton.setText( "Dispatch" )
self.__primaryButton.setEnabled( True )
self.__primaryButton.setVisible( True )
self.__primaryButtonConnection = self.__primaryButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__initiateDispatch ) )
self.__tabs.setCurrent( self.__tabs[0] )
self._getWidget().setChild( self.__settings )
def __initiateDispatch( self, button ) :
self.__progressIconFrame.setChild( GafferUI.BusyWidget() )
self.__progressLabel.setText( "<h3>Dispatching...</h3>" )
self.__backButton.setVisible( False )
self.__backButton.setEnabled( False )
self.__primaryButton.setVisible( False )
self.__primaryButton.setEnabled( False )
self.__messageWidget.clear()
self.__messageCollapsible.setCollapsed( True )
self._getWidget().setChild( self.__progressUI )
threading.Thread( target = self.__dispatch ).start()
def __dispatch( self ) :
try :
with self.__messageWidget.messageHandler() :
with self.__script.context() :
self.__currentDispatcher.dispatch( self.__tasks )
result = 0
except Exception as e :
result = sys.exc_info()
GafferUI.EventLoop.executeOnUIThread( functools.partial( self.__finish, result ) )
def __finish( self, result ) :
if result == 0 :
self.__initiateResultDisplay()
else :
self.__initiateErrorDisplay( result )
def __initiateErrorDisplay( self, exceptionInfo ) :
self.__progressIconFrame.setChild( GafferUI.Image( "failure.png" ) )
self.__progressLabel.setText( "<h3>Failed</h3>" )
self.__messageCollapsible.setCollapsed( False )
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Debug,
"Python Traceback",
"".join( traceback.format_exception( *exceptionInfo ) )
)
# this works for RuntimeError, but is this safe for all exceptions?
excType, excValue, excTrace = exceptionInfo
if excValue and excValue.message:
userFriendlyException = excValue.message.strip( "\n" ).split( "\n" )[-1]
else:
userFriendlyException = str( excType.__name__ )
userFriendlyException += "\nSee DEBUG messages for more information."
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Error,
"Problem Dispatching {nodes}".format( nodes = str( [ task.relativeName( self.__script ) for task in self.__tasks ] ) ),
userFriendlyException,
)
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__backButton._qtWidget().setFocus()
self.__primaryButton.setText( "Quit" )
self.__primaryButton.setEnabled( True )
self.__primaryButton.setVisible( True )
self.__primaryButtonConnection = self.__primaryButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
def __initiateResultDisplay( self ) :
# Although we computed a result successfully, there may still be minor problems
# indicated by messages emitted - check for those.
problems = []
for level in ( IECore.Msg.Level.Error, IECore.Msg.Level.Warning ) :
count = self.__messageWidget.messageCount( level )
if count :
problems.append( "%d %s%s" % ( count, IECore.Msg.levelAsString( level ).capitalize(), "s" if count > 1 else "" ) )
if not problems and self.__postDispatchBehaviour == self.PostDispatchBehaviour.Close :
self.close()
return
self.__progressIconFrame.setChild(
GafferUI.Image( "successWarning.png" if problems else "success.png" )
)
completionMessage = "Completed"
if problems :
completionMessage += " with " + " and ".join( problems )
self.__messageCollapsible.setCollapsed( False )
self.__progressLabel.setText( "<h3>" + completionMessage + "</h3>" )
self.__messageCollapsible.setVisible( self.__messageWidget.messageCount() )
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__primaryButton.setText( "Ok" )
self.__primaryButton.setEnabled( True )
self.__primaryButton.setVisible( True )
self.__primaryButtonConnection = self.__primaryButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
self.__primaryButton._qtWidget().setFocus()
def __close( self, *unused ) :
self.close()
def __messageCollapsibleChanged( self, collapsible ) :
if not collapsible.getCollapsed() :
# make the window bigger to better fit the messages, but don't make
# it any smaller than it currently is.
self.resizeToFitChild( shrink = False )
# remove our connection - we only want to resize the first time we
# show the messages. after this we assume that if the window is smaller
# it is because the user has made it so, and wishes it to remain so.
self.__messageCollapsibleConneciton = None
|
client.py | #!/usr/bin/env python3
import argparse
import collections
import itertools
import json
import logging
import socket
import sys
import threading
import time
import pyglet
import yaml
import messages_pb2 as m
import netchannel as nc
import world as w
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_const', dest='loglevel', const=logging.DEBUG)
parser.add_argument('--no-gui', action='store_true')
args = parser.parse_args()
use_gui = not args.no_gui
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=args.loglevel or logging.INFO)
def process(message):
sv_message = m.ServerMessage()
sv_message.ParseFromString(message.data)
logger.info(f'process seq={message.seq} op={m.ServerMessage.Type.Name(sv_message.op)}')
if sv_message.op == m.ServerMessage.NOOP:
pass
elif sv_message.op == m.ServerMessage.SNAPSHOT:
world.update(sv_message.data)
else:
raise NotImplementedError
def ack():
while True:
time.sleep(1. / config['cl_cmdrate'])
cl_message = m.ClientMessage(data=b'ack')
netchan.transmit(m.Message(reliable=False, data=cl_message.SerializeToString()))
# load the configuration and the world to simulate
with open('../data/config.yml') as f:
config = yaml.load(f)
logger.info(json.dumps(config, indent=4))
with open('../data/world.yml') as f:
world = yaml.load(f)
# set up a netchannel
sv_addr = ('0.0.0.0', 31337)
cl_addr = ('0.0.0.0', 31338)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(cl_addr)
recv_rate = send_rate = 20
netchan = nc.NetChannel(sock, sv_addr, process, recv_rate, send_rate)
threading.Thread(target=ack, daemon=use_gui).start()
if use_gui:
window = pyglet.window.Window()
@window.event
def on_key_press(symbol, modifiers):
cl_message = None
if symbol == pyglet.window.key.D:
cl_message = m.ClientMessage(
commands=[m.ClientMessage.Command(id=m.ClientMessage.Command.SIM_DELETE_RANDOM_ENTITY)])
elif symbol == pyglet.window.key.P:
world.toggle_pause()
cl_message = m.ClientMessage(
commands=[m.ClientMessage.Command(id=m.ClientMessage.Command.SIM_TOGGLE_PAUSE)])
elif symbol == pyglet.window.key.S:
cl_message = m.ClientMessage(
commands=[m.ClientMessage.Command(id=m.ClientMessage.Command.SIM_SPAWN_RANDOM_ENTITY)])
if cl_message:
netchan.transmit(m.Message(reliable=True, data=cl_message.SerializeToString()))
@window.event
def on_key_release(symbol, modifiers):
pass
@window.event
def on_resize(width, height):
pyglet.gl.glViewport(0, 0, 2 * width, 2 * height)
pyglet.gl.glMatrixMode(pyglet.gl.GL_PROJECTION)
pyglet.gl.glLoadIdentity()
pyglet.gl.gluPerspective(45, width / height, 0.1, 1000)
pyglet.gl.gluLookAt(2, 3, -6, 0, 0, 0, 0, 1, 0)
return pyglet.event.EVENT_HANDLED
@window.event
def on_draw():
pyglet.gl.glClear(pyglet.gl.GL_COLOR_BUFFER_BIT | pyglet.gl.GL_DEPTH_BUFFER_BIT)
pyglet.gl.glMatrixMode(pyglet.gl.GL_MODELVIEW)
pyglet.gl.glLoadIdentity()
world.draw()
def update(dt):
world.tick(dt)
pyglet.clock.schedule_interval(update, 1. / config['cl_refreshrate'])
pyglet.app.run()
|
concurrent_schema_changes_test.py | import glob
import os
import pprint
import re
import time
import pytest
import logging
from random import randrange
from threading import Thread
from cassandra.concurrent import execute_concurrent
from ccmlib.node import Node
from dtest import Tester, create_ks
since = pytest.mark.since
logger = logging.getLogger(__name__)
def wait(delay=2):
"""
An abstraction so that the sleep delays can easily be modified.
"""
time.sleep(delay)
@pytest.mark.skip(reason='awaiting CASSANDRA-10699')
class TestConcurrentSchemaChanges(Tester):
allow_log_errors = True
def prepare_for_changes(self, session, namespace='ns1'):
"""
prepares for schema changes by creating a keyspace and column family.
"""
logger.debug("prepare_for_changes() " + str(namespace))
# create a keyspace that will be used
create_ks(session, "ks_%s" % namespace, 2)
session.execute('USE ks_%s' % namespace)
# create a column family with an index and a row of data
query = """
CREATE TABLE cf_%s (
col1 text PRIMARY KEY,
col2 text,
col3 text
);
""" % namespace
session.execute(query)
wait(1)
session.execute("INSERT INTO cf_%s (col1, col2, col3) VALUES ('a', 'b', 'c');"
% namespace)
# create an index
session.execute("CREATE INDEX index_%s ON cf_%s(col2)" % (namespace, namespace))
# create a column family that can be deleted later.
query = """
CREATE TABLE cf2_%s (
col1 uuid PRIMARY KEY,
col2 text,
col3 text
);
""" % namespace
session.execute(query)
# make a keyspace that can be deleted
create_ks(session, "ks2_%s" % namespace, 2)
def make_schema_changes(self, session, namespace='ns1'):
"""
makes a heap of changes.
create keyspace
drop keyspace
create column family
drop column family
update column family
drop index
create index (modify column family and add a key)
rebuild index (via jmx)
set default_validation_class
"""
logger.debug("make_schema_changes() " + str(namespace))
session.execute('USE ks_%s' % namespace)
# drop keyspace
session.execute('DROP KEYSPACE ks2_%s' % namespace)
wait(2)
# create keyspace
create_ks(session, "ks3_%s" % namespace, 2)
session.execute('USE ks_%s' % namespace)
wait(2)
# drop column family
session.execute("DROP COLUMNFAMILY cf2_%s" % namespace)
# create column family
query = """
CREATE TABLE cf3_%s (
col1 uuid PRIMARY KEY,
col2 text,
col3 text,
col4 text
);
""" % (namespace)
session.execute(query)
# alter column family
query = """
ALTER COLUMNFAMILY cf_{}
ADD col4 text;
""".format(namespace)
session.execute(query)
# add index
session.execute("CREATE INDEX index2_{} ON cf_{}(col3)".format(namespace, namespace))
# remove an index
session.execute("DROP INDEX index_{}".format(namespace))
def validate_schema_consistent(self, node):
""" Makes sure that there is only one schema """
logger.debug("validate_schema_consistent() " + node.name)
response = node.nodetool('describecluster').stdout
schemas = response.split('Schema versions:')[1].strip()
num_schemas = len(re.findall('\[.*?\]', schemas))
assert num_schemas, 1 == "There were multiple schema versions: {}".format(pprint.pformat(schemas))
def test_create_lots_of_tables_concurrently(self):
"""
create tables across multiple threads concurrently
"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.cql_connection(node1)
session.execute("create keyspace lots_o_tables WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};")
session.execute("use lots_o_tables")
wait(5)
cmds = [("create table t_{0} (id uuid primary key, c1 text, c2 text, c3 text, c4 text)".format(n), ()) for n in range(250)]
results = execute_concurrent(session, cmds, raise_on_first_error=True, concurrency=200)
for (success, result) in results:
assert success, "didn't get success on table create: {}".format(result)
wait(10)
session.cluster.refresh_schema_metadata()
table_meta = session.cluster.metadata.keyspaces["lots_o_tables"].tables
assert 250 == len(table_meta)
self.validate_schema_consistent(node1)
self.validate_schema_consistent(node2)
self.validate_schema_consistent(node3)
def test_create_lots_of_alters_concurrently(self):
"""
create alters across multiple threads concurrently
"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.cql_connection(node1)
session.execute("create keyspace lots_o_alters WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};")
session.execute("use lots_o_alters")
for n in range(10):
session.execute("create table base_{0} (id uuid primary key)".format(n))
wait(5)
cmds = [("alter table base_{0} add c_{1} int".format(randrange(0, 10), n), ()) for n in range(500)]
logger.debug("executing 500 alters")
results = execute_concurrent(session, cmds, raise_on_first_error=True, concurrency=150)
for (success, result) in results:
assert success, "didn't get success on table create: {}".format(result)
logger.debug("waiting for alters to propagate")
wait(30)
session.cluster.refresh_schema_metadata()
table_meta = session.cluster.metadata.keyspaces["lots_o_alters"].tables
column_ct = sum([len(table.columns) for table in list(table_meta.values())])
# primary key + alters
assert 510 == column_ct
self.validate_schema_consistent(node1)
self.validate_schema_consistent(node2)
self.validate_schema_consistent(node3)
def test_create_lots_of_indexes_concurrently(self):
"""
create indexes across multiple threads concurrently
"""
cluster = self.cluster
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
session = self.cql_connection(node1)
session.execute("create keyspace lots_o_indexes WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};")
session.execute("use lots_o_indexes")
for n in range(5):
session.execute("create table base_{0} (id uuid primary key, c1 int, c2 int)".format(n))
for ins in range(1000):
session.execute("insert into base_{0} (id, c1, c2) values (uuid(), {1}, {2})".format(n, ins, ins))
wait(5)
logger.debug("creating indexes")
cmds = []
for n in range(5):
cmds.append(("create index ix_base_{0}_c1 on base_{0} (c1)".format(n), ()))
cmds.append(("create index ix_base_{0}_c2 on base_{0} (c2)".format(n), ()))
results = execute_concurrent(session, cmds, raise_on_first_error=True)
for (success, result) in results:
assert success, "didn't get success on table create: {}".format(result)
wait(5)
logger.debug("validating schema and index list")
session.cluster.control_connection.wait_for_schema_agreement()
session.cluster.refresh_schema_metadata()
index_meta = session.cluster.metadata.keyspaces["lots_o_indexes"].indexes
self.validate_schema_consistent(node1)
self.validate_schema_consistent(node2)
assert 10 == len(index_meta)
for n in range(5):
assert "ix_base_{0}_c1".format(n) in index_meta
assert "ix_base_{0}_c2".format(n) in index_meta
logger.debug("waiting for indexes to fill in")
wait(45)
logger.debug("querying all values by secondary index")
for n in range(5):
for ins in range(1000):
assert 1 == len(list(session.execute("select * from base_{0} where c1 = {1}".format(n, ins))))
assert 1 == len(list(session.execute("select * from base_{0} where c2 = {1}".format(n, ))))
@since('3.0')
def test_create_lots_of_mv_concurrently(self):
"""
create materialized views across multiple threads concurrently
"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.cql_connection(node1)
session.execute("create keyspace lots_o_views WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};")
session.execute("use lots_o_views")
wait(10)
session.execute("create table source_data (id uuid primary key, c1 int, c2 int, c3 int, c4 int, c5 int, c6 int, c7 int, c8 int, c9 int, c10 int);")
insert_stmt = session.prepare("insert into source_data (id, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10) values (uuid(), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);")
wait(10)
for n in range(4000):
session.execute(insert_stmt, [n] * 10)
wait(10)
for n in range(1, 11):
session.execute(("CREATE MATERIALIZED VIEW src_by_c{0} AS SELECT * FROM source_data "
"WHERE c{0} IS NOT NULL AND id IS NOT NULL PRIMARY KEY (c{0}, id)".format(n)))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug("waiting for indexes to fill in")
wait(60)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='lots_o_views' AND base_table_name='source_data' ALLOW FILTERING")))
assert 10, len(result) == "missing some mv from source_data table"
for n in range(1, 11):
result = list(session.execute("select * from src_by_c{0}".format(n)))
assert 4000 == len(result)
def _do_lots_of_schema_actions(self, session):
for n in range(20):
session.execute("create table alter_me_{0} (id uuid primary key, s1 int, s2 int, s3 int, s4 int, s5 int, s6 int, s7 int);".format(n))
session.execute("create table index_me_{0} (id uuid primary key, c1 int, c2 int, c3 int, c4 int, c5 int, c6 int, c7 int);".format(n))
wait(10)
cmds = []
for n in range(20):
cmds.append(("create table new_table_{0} (id uuid primary key, c1 int, c2 int, c3 int, c4 int);".format(n), ()))
for a in range(1, 8):
cmds.append(("alter table alter_me_{0} drop s{1};".format(n, a), ()))
cmds.append(("alter table alter_me_{0} add c{1} int;".format(n, a), ()))
cmds.append(("create index ix_index_me_{0}_c{1} on index_me_{0} (c{1});".format(n, a), ()))
results = execute_concurrent(session, cmds, concurrency=100, raise_on_first_error=True)
for (success, result) in results:
assert success, "didn't get success: {}".format(result)
def _verify_lots_of_schema_actions(self, session):
session.cluster.control_connection.wait_for_schema_agreement()
# the above should guarentee this -- but to be sure
node1, node2, node3 = self.cluster.nodelist()
self.validate_schema_consistent(node1)
self.validate_schema_consistent(node2)
self.validate_schema_consistent(node3)
session.cluster.refresh_schema_metadata()
table_meta = session.cluster.metadata.keyspaces["lots_o_churn"].tables
errors = []
for n in range(20):
assert "new_table_{0}".format(n) in table_meta
if 7 != len(table_meta["index_me_{0}".format(n)].indexes):
errors.append("index_me_{0} expected indexes ix_index_me_c0->7, got: {1}".format(n, sorted(list(table_meta["index_me_{0}".format(n)].indexes))))
altered = table_meta["alter_me_{0}".format(n)]
for col in altered.columns:
if not col.startswith("c") and col != "id":
errors.append("alter_me_{0} column[{1}] does not start with c and should have been dropped: {2}".format(n, col, sorted(list(altered.columns))))
if 8 != len(altered.columns):
errors.append("alter_me_{0} expected c1 -> c7, id, got: {1}".format(n, sorted(list(altered.columns))))
assert 0 == len(errors), "\n".join(errors)
def test_create_lots_of_schema_churn(self):
"""
create tables, indexes, alters across multiple threads concurrently
"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.cql_connection(node1)
session.execute("create keyspace lots_o_churn WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};")
session.execute("use lots_o_churn")
self._do_lots_of_schema_actions(session)
logger.debug("waiting for things to settle and sync")
wait(60)
self._verify_lots_of_schema_actions(session)
def test_create_lots_of_schema_churn_with_node_down(self):
"""
create tables, indexes, alters across multiple threads concurrently with a node down
"""
cluster = self.cluster
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.cql_connection(node1)
session.execute("create keyspace lots_o_churn WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1};")
session.execute("use lots_o_churn")
node2.stop()
self._do_lots_of_schema_actions(session)
wait(15)
node2.start(wait_other_notice=True)
logger.debug("waiting for things to settle and sync")
wait(120)
self._verify_lots_of_schema_actions(session)
def test_basic(self):
"""
make several schema changes on the same node.
"""
logger.debug("basic_test()")
cluster = self.cluster
cluster.populate(2).start()
node1 = cluster.nodelist()[0]
wait(2)
session = self.cql_connection(node1)
self.prepare_for_changes(session, namespace='ns1')
self.make_schema_changes(session, namespace='ns1')
def test_changes_to_different_nodes(self):
logger.debug("changes_to_different_nodes_test()")
cluster = self.cluster
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
wait(2)
session = self.cql_connection(node1)
self.prepare_for_changes(session, namespace='ns1')
self.make_schema_changes(session, namespace='ns1')
wait(3)
self.validate_schema_consistent(node1)
# wait for changes to get to the first node
wait(20)
session = self.cql_connection(node2)
self.prepare_for_changes(session, namespace='ns2')
self.make_schema_changes(session, namespace='ns2')
wait(3)
self.validate_schema_consistent(node1)
# check both, just because we can
self.validate_schema_consistent(node2)
def test_changes_while_node_down(self):
"""
makes schema changes while a node is down.
Make schema changes to node 1 while node 2 is down.
Then bring up 2 and make sure it gets the changes.
"""
logger.debug("changes_while_node_down_test()")
cluster = self.cluster
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
wait(2)
session = self.patient_cql_connection(node2)
self.prepare_for_changes(session, namespace='ns2')
node1.stop()
wait(2)
self.make_schema_changes(session, namespace='ns2')
wait(2)
node2.stop()
wait(2)
node1.start()
node2.start()
wait(20)
self.validate_schema_consistent(node1)
def test_changes_while_node_toggle(self):
"""
makes schema changes while a node is down.
Bring down 1 and change 2.
Bring down 2, bring up 1, and finally bring up 2.
1 should get the changes.
"""
logger.debug("changes_while_node_toggle_test()")
cluster = self.cluster
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
wait(2)
session = self.patient_cql_connection(node2)
self.prepare_for_changes(session, namespace='ns2')
node1.stop()
wait(2)
self.make_schema_changes(session, namespace='ns2')
wait(2)
node2.stop()
wait(2)
node1.start()
node2.start()
wait(20)
self.validate_schema_consistent(node1)
def test_decommission_node(self):
logger.debug("decommission_node_test()")
cluster = self.cluster
cluster.populate(1)
# create and add a new node, I must not be a seed, otherwise
# we get schema disagreement issues for awhile after decommissioning it.
node2 = Node('node2',
cluster,
True,
('127.0.0.2', 9160),
('127.0.0.2', 7000),
'7200',
'0',
None,
binary_interface=('127.0.0.2', 9042))
cluster.add(node2, False)
node1, node2 = cluster.nodelist()
node1.start(wait_for_binary_proto=True)
node2.start(wait_for_binary_proto=True)
wait(2)
session = self.patient_cql_connection(node1)
self.prepare_for_changes(session)
node2.decommission()
wait(30)
self.validate_schema_consistent(node1)
self.make_schema_changes(session, namespace='ns1')
# create and add a new node
node3 = Node('node3',
cluster,
True,
('127.0.0.3', 9160),
('127.0.0.3', 7000),
'7300',
'0',
None,
binary_interface=('127.0.0.3', 9042))
cluster.add(node3, True)
node3.start(wait_for_binary_proto=True)
wait(30)
self.validate_schema_consistent(node1)
def test_snapshot(self):
logger.debug("snapshot_test()")
cluster = self.cluster
cluster.populate(2).start()
node1, node2 = cluster.nodelist()
wait(2)
session = self.cql_connection(node1)
self.prepare_for_changes(session, namespace='ns2')
wait(2)
cluster.flush()
wait(2)
node1.nodetool('snapshot -t testsnapshot')
node2.nodetool('snapshot -t testsnapshot')
wait(2)
self.make_schema_changes(session, namespace='ns2')
wait(2)
cluster.stop()
# restore the snapshots
# clear the commitlogs and data
dirs = ('%s/commitlogs' % node1.get_path(),
'%s/commitlogs' % node2.get_path(),
'%s/data/ks_ns2/cf_*/*' % node1.get_path(),
'%s/data/ks_ns2/cf_*/*' % node2.get_path(),
)
for dirr in dirs:
for f in glob.glob(os.path.join(dirr)):
if os.path.isfile(f):
os.unlink(f)
# copy the snapshot. TODO: This could be replaced with the creation of hard links.
os.system('cp -p %s/data/ks_ns2/cf_*/snapshots/testsnapshot/* %s/data/ks_ns2/cf_*/' % (node1.get_path(), node1.get_path()))
os.system('cp -p %s/data/ks_ns2/cf_*/snapshots/testsnapshot/* %s/data/ks_ns2/cf_*/' % (node2.get_path(), node2.get_path()))
# restart the cluster
cluster.start()
wait(2)
self.validate_schema_consistent(node1)
def test_load(self):
"""
apply schema changes while the cluster is under load.
"""
logger.debug("load_test()")
cluster = self.cluster
cluster.populate(1).start()
node1 = cluster.nodelist()[0]
wait(2)
session = self.cql_connection(node1)
def stress(args=[]):
logger.debug("Stressing")
node1.stress(args)
logger.debug("Done Stressing")
def compact():
logger.debug("Compacting...")
node1.nodetool('compact')
logger.debug("Done Compacting.")
# put some data into the cluster
stress(['write', 'n=30000', 'no-warmup', '-rate', 'threads=8'])
# now start stressing and compacting at the same time
tcompact = Thread(target=compact)
tcompact.start()
wait(1)
# now the cluster is under a lot of load. Make some schema changes.
session.execute('USE keyspace1')
wait(1)
session.execute('DROP TABLE standard1')
wait(3)
session.execute('CREATE TABLE standard1 (KEY text PRIMARY KEY)')
tcompact.join()
|
tweeter.py | # -*- coding: utf-8 -*-
from time import sleep
import tkinter
from tkinter import *
from tkinter import ttk
from tkinter import font
import emoji, sys
import tweepy
import threading
# ~ NOTE ~
#-ウィンドウサイズ指定-#
window_width = 500 # x
window_height = 200 # y
#-ツイート格納リスト-#
tweets = []
# Twitter APIキー
consumer_key='CONSUMER_KEY'
consumer_secret='CONSUMER_SECRET'
access_key='ACCESS_KEY'
access_secret='ACCESS_SECRET'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth_handler=auth)
#-検索ワード-# ↓キーワード
trend_name = "最初の言語 exclude:retweets"
textnum = 0
#--ウィンドウ作成--#
root = Tk()
root.title("Tweeter1.2")
window_width = root.winfo_screenwidth() # x
window_height = root.winfo_screenheight() # y
x_place = window_width/2
root.configure(width=window_width, height=window_height, bg='#101922') #ウィンドウの大きさを指定
root.attributes("-fullscreen", True) # フルスクリーン
textfont = font.Font(size=23, weight="bold", family=("Arial", "ヒラギノ角ゴ Pro W3", "Hiragino Kaku Gothic Pro", "Osaka", "メイリオ", "Meiryo", "MS Pゴシック", "MS PGothic", "sans-serif"))
textfont2 = font.Font(size=18, weight="bold", family=("Arial", "ヒラギノ角ゴ Pro W3", "Hiragino Kaku Gothic Pro", "Osaka", "メイリオ", "Meiryo", "MS Pゴシック", "MS PGothic", "sans-serif"))
text = ttk.Label(root, text="Tweeter", font=textfont, wraplength=window_width/1.5, foreground='#FFFFFF' ,background='#101922')# padding=(幅, 高さ)
#--終了ボタン配置--#
def DeleteEntryValue():
print("EXIT")
sys.exit()
Button = tkinter.Button(text='EXIT', width=50) # #ffffff00
Button.bind("<Button-1>",DeleteEntryValue)
#--ツイート内絵文字削除--#
def remove_emoji(src_str):
return ''.join(c for c in src_str if c not in emoji.UNICODE_EMOJI)
#--第一トレンドのツイート取得--#
def trend_tweet():
global trend_name
trend = api.trends_place(23424856)[0] # 日本のワールドID
trends = trend['trends']
#trend_name = trends[0]["name"] # トレンド名
for status in api.search(q=trend_name, lang='ja', result_type='recent',count=100):
textmain = remove_emoji(status.text)
textmain = textmain.split('https')[0]
#textmain = textmain.encode('shift_jis', errors='ignore')
if len(textmain) < 40:
tweets.append(textmain) # ツイートリスト
return tweets
tweets = trend_tweet()
trend_text = ttk.Label(root, text="Search "+trend_name.replace("exclude:retweets",""), font=textfont2, foreground='#FFFFFF' ,background='#101922')# padding=(幅, 高さ)
trend_text.place(x=10, y=20, anchor='w') # anchor=文字列 オフセットの位置を指定する
# ツイート取得数
print("GET LENGTH: "+str(len(tweets)))
#--スレッド処理--#
def thread():
global x_place, textnum, tweets
def wisper_s():
global text
for i in range(5):
if i == 0:
text["foreground"] = '#1f2b36'
elif i == 1:
text["foreground"] = '#3c4e61'
elif i == 2:
text["foreground"] = '#8594a1'
elif i == 3:
text["foreground"] = '#a5b9c9'
elif i == 4:
text["foreground"] = '#FFFFFF'
# - - 減速 - - #
sleep(0.1)
def wisper_e():
global text
for i in range(5):
if i == 0:
text["foreground"] = '#FFFFFF'
elif i == 1:
text["foreground"] = '#a5b9c9'
elif i == 2:
text["foreground"] = '#8594a1'
elif i == 3:
text["foreground"] = '#3c4e61'
elif i == 4:
text["foreground"] = '#1f2b36'
# - - 減速 - - #
sleep(0.1)
while True:
text.place(x=x_place, y=window_height/2, anchor=CENTER) # anchor=文字列 オフセットの位置を指定する
wisper_s()
sleep(2)
wisper_e()
try:
text["text"] = str(tweets[textnum])
except:
text["text"] = "Exception handling message..."
print("Exception handling message...")
if textnum!=len(tweets)-1:
textnum+=1
else:
textnum=0
tweets = []
tweets = trend_tweet()
# ツイート取得数
print("GET LENGTH: "+str(len(tweets)))
if __name__=="__main__":
thread_1 = threading.Thread(target=thread)
thread_1.start()
#--終了--#
root.mainloop() |
locators.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
import json
import logging
import os
import posixpath
import re
from io import BytesIO
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata, MetadataInvalidError
from .util import (cached_property, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy, normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
try:
return client.list_packages()
finally:
client('close')()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None: # pragma: no cover
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme == 'https', 'pypi.python.org' in t.netloc,
is_downloadable, is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='): # pragma: no cover
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/': # pragma: no cover
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if not is_compatible(wheel, self.wheel_tags):
logger.debug('Wheel not compatible: %s', path)
else:
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
logger.debug('Not downloadable: %s', path)
else: # downloadable extension
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t: # pragma: no cover
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver: # pragma: no cover
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None: # pragma: no cover
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd: # pragma: no cover
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
self.platform_check = False # See issue #112
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
r'win(32|_amd64)|macosx_?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self.platform_check and self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
try:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except MetadataInvalidError: # e.g. invalid versions
pass
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if meta_extras and dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
main.py | from tkinter import Tk, PhotoImage, Label, Entry, Button, StringVar
from tkinter.filedialog import askdirectory
from tkinter.messagebox import showinfo
from functools import partial
from threading import Thread
from pytube import YouTube
def dldfunc(url1):
global thumburl
url2 = (url1.get())
path = askdirectory()
yt = YouTube(url2)
thumburl = yt.thumbnail_url
yt.streams.filter(progressive=True, res='720p').first().download(path)
showinfo("Downloaded", 'Download Successfull!')
Entry.delete(0, 'end')
def altthread(url):
dlthread = Thread(target=dldfunc(url))
dlthread.start()
# gui
root = Tk()
root.geometry('400x500')
file = PhotoImage(file='youtube-video-downloader\\images\\head_icon.png')
Label(root, image=file).pack(side='top')
root.title('PytubeDL-Unofficial')
url = StringVar()
Label(root, text="Input URL \U00002193").pack()
Entry(root, textvariable=url).pack(side='top', fill='x', padx=5)
dldfunc1 = partial(altthread, url)
btn = Button(root, text="Download", command=dldfunc1)
btn.pack(side='top')
root.mainloop()
|
gcsio.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Google Cloud Storage client.
This library evolved from the Google App Engine GCS client available at
https://github.com/GoogleCloudPlatform/appengine-gcs-client.
"""
import cStringIO
import errno
import fnmatch
import logging
import multiprocessing
import os
import Queue
import re
import threading
import time
import traceback
import httplib2
from apache_beam.utils import retry
__all__ = ['GcsIO']
# Issue a friendlier error message if the storage library is not available.
# TODO(silviuc): Remove this guard when storage is available everywhere.
try:
# pylint: disable=wrong-import-order, wrong-import-position
# pylint: disable=ungrouped-imports
import apitools.base.py.transfer as transfer
from apitools.base.py.batch import BatchApiRequest
from apitools.base.py.exceptions import HttpError
from apache_beam.internal.gcp import auth
from apache_beam.io.gcp.internal.clients import storage
except ImportError:
raise ImportError(
'Google Cloud Storage I/O not supported for this execution environment '
'(could not import storage API client).')
# This is the size of each partial-file read operation from GCS. This
# parameter was chosen to give good throughput while keeping memory usage at
# a reasonable level; the following table shows throughput reached when
# reading files of a given size with a chosen buffer size and informed the
# choice of the value, as of 11/2016:
#
# +---------------+------------+-------------+-------------+-------------+
# | | 50 MB file | 100 MB file | 200 MB file | 400 MB file |
# +---------------+------------+-------------+-------------+-------------+
# | 8 MB buffer | 17.12 MB/s | 22.67 MB/s | 23.81 MB/s | 26.05 MB/s |
# | 16 MB buffer | 24.21 MB/s | 42.70 MB/s | 42.89 MB/s | 46.92 MB/s |
# | 32 MB buffer | 28.53 MB/s | 48.08 MB/s | 54.30 MB/s | 54.65 MB/s |
# | 400 MB buffer | 34.72 MB/s | 71.13 MB/s | 79.13 MB/s | 85.39 MB/s |
# +---------------+------------+-------------+-------------+-------------+
DEFAULT_READ_BUFFER_SIZE = 16 * 1024 * 1024
# This is the number of seconds the library will wait for GCS operations to
# complete.
DEFAULT_HTTP_TIMEOUT_SECONDS = 60
# This is the number of seconds the library will wait for a partial-file read
# operation from GCS to complete before retrying.
DEFAULT_READ_SEGMENT_TIMEOUT_SECONDS = 60
# This is the size of chunks used when writing to GCS.
WRITE_CHUNK_SIZE = 8 * 1024 * 1024
# Maximum number of operations permitted in GcsIO.copy_batch() and
# GcsIO.delete_batch().
MAX_BATCH_OPERATION_SIZE = 100
def parse_gcs_path(gcs_path):
"""Return the bucket and object names of the given gs:// path."""
match = re.match('^gs://([^/]+)/(.+)$', gcs_path)
if match is None:
raise ValueError('GCS path must be in the form gs://<bucket>/<object>.')
return match.group(1), match.group(2)
class GcsIOError(IOError, retry.PermanentException):
"""GCS IO error that should not be retried."""
pass
class GcsIO(object):
"""Google Cloud Storage I/O client."""
def __new__(cls, storage_client=None):
if storage_client:
# This path is only used for testing.
return super(GcsIO, cls).__new__(cls)
else:
# Create a single storage client for each thread. We would like to avoid
# creating more than one storage client for each thread, since each
# initialization requires the relatively expensive step of initializing
# credentaials.
local_state = threading.local()
if getattr(local_state, 'gcsio_instance', None) is None:
credentials = auth.get_service_credentials()
storage_client = storage.StorageV1(
credentials=credentials,
get_credentials=False,
http=httplib2.Http(timeout=DEFAULT_HTTP_TIMEOUT_SECONDS))
local_state.gcsio_instance = (
super(GcsIO, cls).__new__(cls, storage_client))
local_state.gcsio_instance.client = storage_client
return local_state.gcsio_instance
def __init__(self, storage_client=None):
# We must do this check on storage_client because the client attribute may
# have already been set in __new__ for the singleton case when
# storage_client is None.
if storage_client is not None:
self.client = storage_client
def open(self,
filename,
mode='r',
read_buffer_size=DEFAULT_READ_BUFFER_SIZE,
mime_type='application/octet-stream'):
"""Open a GCS file path for reading or writing.
Args:
filename (str): GCS file path in the form ``gs://<bucket>/<object>``.
mode (str): ``'r'`` for reading or ``'w'`` for writing.
read_buffer_size (int): Buffer size to use during read operations.
mime_type (str): Mime type to set for write operations.
Returns:
GCS file object.
Raises:
~exceptions.ValueError: Invalid open file mode.
"""
if mode == 'r' or mode == 'rb':
return GcsBufferedReader(self.client, filename, mode=mode,
buffer_size=read_buffer_size)
elif mode == 'w' or mode == 'wb':
return GcsBufferedWriter(self.client, filename, mode=mode,
mime_type=mime_type)
else:
raise ValueError('Invalid file open mode: %s.' % mode)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def glob(self, pattern, limit=None):
"""Return the GCS path names matching a given path name pattern.
Path name patterns are those recognized by fnmatch.fnmatch(). The path
can contain glob characters (*, ?, and [...] sets).
Args:
pattern: GCS file path pattern in the form gs://<bucket>/<name_pattern>.
limit: Maximal number of path names to return.
All matching paths are returned if set to None.
Returns:
list of GCS file paths matching the given pattern.
"""
bucket, name_pattern = parse_gcs_path(pattern)
# Get the prefix with which we can list objects in the given bucket.
prefix = re.match('^[^[*?]*', name_pattern).group(0)
request = storage.StorageObjectsListRequest(bucket=bucket, prefix=prefix)
object_paths = []
while True:
response = self.client.objects.List(request)
for item in response.items:
if fnmatch.fnmatch(item.name, name_pattern):
object_paths.append('gs://%s/%s' % (item.bucket, item.name))
if response.nextPageToken:
request.pageToken = response.nextPageToken
if limit is not None and len(object_paths) >= limit:
break
else:
break
return object_paths[:limit]
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def delete(self, path):
"""Deletes the object at the given GCS path.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
try:
self.client.objects.Delete(request)
except HttpError as http_error:
if http_error.status_code == 404:
# Return success when the file doesn't exist anymore for idempotency.
return
raise
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def delete_batch(self, paths):
"""Deletes the objects at the given GCS paths.
Args:
paths: List of GCS file path patterns in the form gs://<bucket>/<name>,
not to exceed MAX_BATCH_OPERATION_SIZE in length.
Returns: List of tuples of (path, exception) in the same order as the paths
argument, where exception is None if the operation succeeded or
the relevant exception if the operation failed.
"""
if not paths:
return []
batch_request = BatchApiRequest(
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES)
for path in paths:
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
batch_request.Add(self.client.objects, 'Delete', request)
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
result_statuses = []
for i, api_call in enumerate(api_calls):
path = paths[i]
exception = None
if api_call.is_error:
exception = api_call.exception
# Return success when the file doesn't exist anymore for idempotency.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = None
result_statuses.append((path, exception))
return result_statuses
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def copy(self, src, dest):
"""Copies the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
"""
src_bucket, src_path = parse_gcs_path(src)
dest_bucket, dest_path = parse_gcs_path(dest)
request = storage.StorageObjectsCopyRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path)
try:
self.client.objects.Copy(request)
except HttpError as http_error:
if http_error.status_code == 404:
# This is a permanent error that should not be retried. Note that
# FileBasedSink.finalize_write expects an IOError when the source
# file does not exist.
raise GcsIOError(errno.ENOENT, 'Source file not found: %s' % src)
raise
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def copy_batch(self, src_dest_pairs):
"""Copies the given GCS object from src to dest.
Args:
src_dest_pairs: list of (src, dest) tuples of gs://<bucket>/<name> files
paths to copy from src to dest, not to exceed
MAX_BATCH_OPERATION_SIZE in length.
Returns: List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
if not src_dest_pairs:
return []
batch_request = BatchApiRequest(
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES)
for src, dest in src_dest_pairs:
src_bucket, src_path = parse_gcs_path(src)
dest_bucket, dest_path = parse_gcs_path(dest)
request = storage.StorageObjectsCopyRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path)
batch_request.Add(self.client.objects, 'Copy', request)
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
result_statuses = []
for i, api_call in enumerate(api_calls):
src, dest = src_dest_pairs[i]
exception = None
if api_call.is_error:
exception = api_call.exception
# Translate 404 to the appropriate not found exception.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = (
GcsIOError(errno.ENOENT, 'Source file not found: %s' % src))
result_statuses.append((src, dest, exception))
return result_statuses
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def copytree(self, src, dest):
"""Renames the given GCS "directory" recursively from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>/.
dest: GCS file path pattern in the form gs://<bucket>/<name>/.
"""
assert src.endswith('/')
assert dest.endswith('/')
for entry in self.glob(src + '*'):
rel_path = entry[len(src):]
self.copy(entry, dest + rel_path)
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def rename(self, src, dest):
"""Renames the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
"""
self.copy(src, dest)
self.delete(src)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def exists(self, path):
"""Returns whether the given GCS object exists.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
try:
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
self.client.objects.Get(request) # metadata
return True
except HttpError as http_error:
if http_error.status_code == 404:
# HTTP 404 indicates that the file did not exist
return False
else:
# We re-raise all other exceptions
raise
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def size(self, path):
"""Returns the size of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: size of the GCS object in bytes.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).size
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def size_of_files_in_glob(self, pattern, limit=None):
"""Returns the size of all the files in the glob as a dictionary
Args:
pattern: a file path pattern that reads the size of all the files
"""
bucket, name_pattern = parse_gcs_path(pattern)
# Get the prefix with which we can list objects in the given bucket.
prefix = re.match('^[^[*?]*', name_pattern).group(0)
request = storage.StorageObjectsListRequest(bucket=bucket, prefix=prefix)
file_sizes = {}
counter = 0
start_time = time.time()
logging.info("Starting the size estimation of the input")
while True:
response = self.client.objects.List(request)
for item in response.items:
if fnmatch.fnmatch(item.name, name_pattern):
file_name = 'gs://%s/%s' % (item.bucket, item.name)
file_sizes[file_name] = item.size
counter += 1
if limit is not None and counter >= limit:
break
if counter % 10000 == 0:
logging.info("Finished computing size of: %s files", len(file_sizes))
if response.nextPageToken:
request.pageToken = response.nextPageToken
if limit is not None and len(file_sizes) >= limit:
break
else:
break
logging.info(
"Finished the size estimation of the input at %s files. " +\
"Estimation took %s seconds", counter, time.time() - start_time)
return file_sizes
# TODO: Consider using cStringIO instead of buffers and data_lists when reading.
class GcsBufferedReader(object):
"""A class for reading Google Cloud Storage files."""
def __init__(self,
client,
path,
mode='r',
buffer_size=DEFAULT_READ_BUFFER_SIZE,
segment_timeout=DEFAULT_READ_SEGMENT_TIMEOUT_SECONDS):
self.client = client
self.path = path
self.bucket, self.name = parse_gcs_path(path)
self.mode = mode
self.buffer_size = buffer_size
self.segment_timeout = segment_timeout
# Get object state.
self.get_request = (storage.StorageObjectsGetRequest(
bucket=self.bucket, object=self.name))
try:
metadata = self._get_object_metadata(self.get_request)
except HttpError as http_error:
if http_error.status_code == 404:
raise IOError(errno.ENOENT, 'Not found: %s' % self.path)
else:
logging.error('HTTP error while requesting file %s: %s', self.path,
http_error)
raise
self.size = metadata.size
# Ensure read is from file of the correct generation.
self.get_request.generation = metadata.generation
# Initialize read buffer state.
self.download_stream = cStringIO.StringIO()
self.downloader = transfer.Download(
self.download_stream, auto_transfer=False, chunksize=self.buffer_size)
self.client.objects.Get(self.get_request, download=self.downloader)
self.position = 0
self.buffer = ''
self.buffer_start_position = 0
self.closed = False
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_object_metadata(self, get_request):
return self.client.objects.Get(get_request)
def __iter__(self):
return self
def __next__(self):
"""Read one line delimited by '\\n' from the file.
"""
return next(self)
def next(self):
"""Read one line delimited by '\\n' from the file.
"""
line = self.readline()
if not line:
raise StopIteration
return line
def read(self, size=-1):
"""Read data from a GCS file.
Args:
size: Number of bytes to read. Actual number of bytes read is always
equal to size unless EOF is reached. If size is negative or
unspecified, read the entire file.
Returns:
data read as str.
Raises:
IOError: When this buffer is closed.
"""
return self._read_inner(size=size, readline=False)
def readline(self, size=-1):
"""Read one line delimited by '\\n' from the file.
Mimics behavior of the readline() method on standard file objects.
A trailing newline character is kept in the string. It may be absent when a
file ends with an incomplete line. If the size argument is non-negative,
it specifies the maximum string size (counting the newline) to return.
A negative size is the same as unspecified. Empty string is returned
only when EOF is encountered immediately.
Args:
size: Maximum number of bytes to read. If not specified, readline stops
only on '\\n' or EOF.
Returns:
The data read as a string.
Raises:
IOError: When this buffer is closed.
"""
return self._read_inner(size=size, readline=True)
def _read_inner(self, size=-1, readline=False):
"""Shared implementation of read() and readline()."""
self._check_open()
if not self._remaining():
return ''
# Prepare to read.
data_list = []
if size is None:
size = -1
to_read = min(size, self._remaining())
if to_read < 0:
to_read = self._remaining()
break_after = False
while to_read > 0:
# If we have exhausted the buffer, get the next segment.
# TODO(ccy): We should consider prefetching the next block in another
# thread.
self._fetch_next_if_buffer_exhausted()
# Determine number of bytes to read from buffer.
buffer_bytes_read = self.position - self.buffer_start_position
bytes_to_read_from_buffer = min(
len(self.buffer) - buffer_bytes_read, to_read)
# If readline is set, we only want to read up to and including the next
# newline character.
if readline:
next_newline_position = self.buffer.find('\n', buffer_bytes_read,
len(self.buffer))
if next_newline_position != -1:
bytes_to_read_from_buffer = (
1 + next_newline_position - buffer_bytes_read)
break_after = True
# Read bytes.
data_list.append(self.buffer[buffer_bytes_read:buffer_bytes_read +
bytes_to_read_from_buffer])
self.position += bytes_to_read_from_buffer
to_read -= bytes_to_read_from_buffer
if break_after:
break
return ''.join(data_list)
def _fetch_next_if_buffer_exhausted(self):
if not self.buffer or (
self.buffer_start_position + len(self.buffer) <= self.position):
bytes_to_request = min(self._remaining(), self.buffer_size)
self.buffer_start_position = self.position
retry_count = 0
while retry_count <= 10:
queue = Queue.Queue()
t = threading.Thread(target=self._fetch_to_queue,
args=(queue, self._get_segment,
(self.position, bytes_to_request)))
t.daemon = True
t.start()
try:
result, exn, tb = queue.get(timeout=self.segment_timeout)
except Queue.Empty:
logging.warning(
('Timed out fetching %d bytes from position %d of %s after %f '
'seconds; retrying...'), bytes_to_request, self.position,
self.path, self.segment_timeout)
retry_count += 1
# Reinitialize download objects.
self.download_stream = cStringIO.StringIO()
self.downloader = transfer.Download(
self.download_stream, auto_transfer=False,
chunksize=self.buffer_size)
self.client.objects.Get(self.get_request, download=self.downloader)
continue
if exn:
logging.error(
('Exception while fetching %d bytes from position %d of %s: '
'%s\n%s'),
bytes_to_request, self.position, self.path, exn, tb)
raise exn
self.buffer = result
return
raise GcsIOError(
'Reached retry limit for _fetch_next_if_buffer_exhausted.')
def _fetch_to_queue(self, queue, func, args):
try:
value = func(*args)
queue.put((value, None, None))
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
queue.put((None, e, tb))
def _remaining(self):
return self.size - self.position
def close(self):
"""Close the current GCS file."""
self.closed = True
self.download_stream = None
self.downloader = None
self.buffer = None
def _get_segment(self, start, size):
"""Get the given segment of the current GCS file."""
if size == 0:
return ''
# The objects self.downloader and self.download_stream may be recreated if
# this call times out; we save them locally to avoid any threading issues.
downloader = self.downloader
download_stream = self.download_stream
end = start + size - 1
downloader.GetRange(start, end)
value = download_stream.getvalue()
# Clear the cStringIO object after we've read its contents.
download_stream.truncate(0)
assert len(value) == size
return value
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def seek(self, offset, whence=os.SEEK_SET):
"""Set the file's current offset.
Note if the new offset is out of bound, it is adjusted to either 0 or EOF.
Args:
offset: seek offset as number.
whence: seek mode. Supported modes are os.SEEK_SET (absolute seek),
os.SEEK_CUR (seek relative to the current position), and os.SEEK_END
(seek relative to the end, offset should be negative).
Raises:
IOError: When this buffer is closed.
ValueError: When whence is invalid.
"""
self._check_open()
self.buffer = ''
self.buffer_start_position = -1
if whence == os.SEEK_SET:
self.position = offset
elif whence == os.SEEK_CUR:
self.position += offset
elif whence == os.SEEK_END:
self.position = self.size + offset
else:
raise ValueError('Whence mode %r is invalid.' % whence)
self.position = min(self.position, self.size)
self.position = max(self.position, 0)
def tell(self):
"""Tell the file's current offset.
Returns:
current offset in reading this file.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
return self.position
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return True
def readable(self):
return True
def writable(self):
return False
# TODO: Consider using cStringIO instead of buffers and data_lists when reading
# and writing.
class GcsBufferedWriter(object):
"""A class for writing Google Cloud Storage files."""
class PipeStream(object):
"""A class that presents a pipe connection as a readable stream."""
def __init__(self, recv_pipe):
self.conn = recv_pipe
self.closed = False
self.position = 0
self.remaining = ''
def read(self, size):
"""Read data from the wrapped pipe connection.
Args:
size: Number of bytes to read. Actual number of bytes read is always
equal to size unless EOF is reached.
Returns:
data read as str.
"""
data_list = []
bytes_read = 0
while bytes_read < size:
bytes_from_remaining = min(size - bytes_read, len(self.remaining))
data_list.append(self.remaining[0:bytes_from_remaining])
self.remaining = self.remaining[bytes_from_remaining:]
self.position += bytes_from_remaining
bytes_read += bytes_from_remaining
if not self.remaining:
try:
self.remaining = self.conn.recv_bytes()
except EOFError:
break
return ''.join(data_list)
def tell(self):
"""Tell the file's current offset.
Returns:
current offset in reading this file.
Raises:
IOError: When this stream is closed.
"""
self._check_open()
return self.position
def seek(self, offset, whence=os.SEEK_SET):
# The apitools.base.py.transfer.Upload class insists on seeking to the end
# of a stream to do a check before completing an upload, so we must have
# this no-op method here in that case.
if whence == os.SEEK_END and offset == 0:
return
elif whence == os.SEEK_SET and offset == self.position:
return
raise NotImplementedError
def _check_open(self):
if self.closed:
raise IOError('Stream is closed.')
def __init__(self,
client,
path,
mode='w',
mime_type='application/octet-stream'):
self.client = client
self.path = path
self.mode = mode
self.bucket, self.name = parse_gcs_path(path)
self.closed = False
self.position = 0
# A small buffer to avoid CPU-heavy per-write pipe calls.
self.write_buffer = bytearray()
self.write_buffer_size = 128 * 1024
# Set up communication with uploading thread.
parent_conn, child_conn = multiprocessing.Pipe()
self.child_conn = child_conn
self.conn = parent_conn
# Set up uploader.
self.insert_request = (storage.StorageObjectsInsertRequest(
bucket=self.bucket, name=self.name))
self.upload = transfer.Upload(
GcsBufferedWriter.PipeStream(child_conn),
mime_type,
chunksize=WRITE_CHUNK_SIZE)
self.upload.strategy = transfer.RESUMABLE_UPLOAD
# Start uploading thread.
self.upload_thread = threading.Thread(target=self._start_upload)
self.upload_thread.daemon = True
self.upload_thread.last_error = None
self.upload_thread.start()
# TODO(silviuc): Refactor so that retry logic can be applied.
# There is retry logic in the underlying transfer library but we should make
# it more explicit so we can control the retry parameters.
@retry.no_retries # Using no_retries marks this as an integration point.
def _start_upload(self):
# This starts the uploader thread. We are forced to run the uploader in
# another thread because the apitools uploader insists on taking a stream
# as input. Happily, this also means we get asynchronous I/O to GCS.
#
# The uploader by default transfers data in chunks of 1024 * 1024 bytes at
# a time, buffering writes until that size is reached.
try:
self.client.objects.Insert(self.insert_request, upload=self.upload)
except Exception as e: # pylint: disable=broad-except
logging.error('Error in _start_upload while inserting file %s: %s',
self.path, traceback.format_exc())
self.upload_thread.last_error = e
finally:
self.child_conn.close()
def write(self, data):
"""Write data to a GCS file.
Args:
data: data to write as str.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if not data:
return
self.write_buffer.extend(data)
if len(self.write_buffer) > self.write_buffer_size:
self._flush_write_buffer()
self.position += len(data)
def flush(self):
"""Flushes any internal buffer to the underlying GCS file."""
self._check_open()
self._flush_write_buffer()
def tell(self):
"""Return the total number of bytes passed to write() so far."""
return self.position
def close(self):
"""Close the current GCS file."""
if self.closed:
logging.warn('Channel for %s is not open.', self.path)
return
self._flush_write_buffer()
self.closed = True
self.conn.close()
self.upload_thread.join()
# Check for exception since the last _flush_write_buffer() call.
if self.upload_thread.last_error:
raise self.upload_thread.last_error # pylint: disable=raising-bad-type
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
def _check_open(self):
if self.closed:
raise IOError('Buffer is closed.')
def seekable(self):
return False
def readable(self):
return False
def writable(self):
return True
def _flush_write_buffer(self):
try:
self.conn.send_bytes(buffer(self.write_buffer))
self.write_buffer = bytearray()
except IOError:
if self.upload_thread.last_error:
raise self.upload_thread.last_error # pylint: disable=raising-bad-type
else:
raise
|
tests.py | # -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import datetime
import re
import threading
import unittest
import warnings
from decimal import Decimal, Rounded
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connection, connections,
reset_queries, transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.postgresql import version as pg_version
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper, format_number
from django.db.models import Avg, StdDev, Sum, Variance
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, mock, override_settings,
skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils import six
from django.utils.six.moves import range
from . import models
class DummyBackendTest(SimpleTestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
with self.assertRaises(ImproperlyConfigured):
conns[DEFAULT_DB_ALIAS].ensure_connection()
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in range(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(models.Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
self.assertRaises(
NotImplementedError,
models.Item.objects.all().aggregate,
**{'complex': aggregate('last_modified') + aggregate('last_modified')})
def test_memory_db_test_name(self):
"""
A named in-memory db should be allowed where supported.
"""
from django.db.backends.sqlite3.base import DatabaseWrapper
settings_dict = {
'TEST': {
'NAME': 'file:memorydb_test?mode=memory&cache=shared',
}
}
wrapper = DatabaseWrapper(settings_dict)
creation = wrapper.creation
if creation.connection.features.can_share_in_memory_db:
expected = creation.connection.settings_dict['TEST']['NAME']
self.assertEqual(creation._get_test_db_name(), expected)
else:
msg = (
"Using a shared memory database with `mode=memory` in the "
"database name is not supported in your environment, "
"use `:memory:` instead."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
creation._get_test_db_name()
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses(
"PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC "
"i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)",
90301
)
def test_nodb_connection(self):
"""
Test that the _nodb_connection property fallbacks to the default connection
database when access to the 'postgres' database is not granted.
"""
def mocked_connect(self):
if self.settings_dict['NAME'] is None:
raise DatabaseError()
return ''
nodb_conn = connection._nodb_connection
self.assertIsNone(nodb_conn.settings_dict['NAME'])
# Now assume the 'postgres' db isn't available
with warnings.catch_warnings(record=True) as w:
with mock.patch('django.db.backends.base.base.BaseDatabaseWrapper.connect',
side_effect=mocked_connect, autospec=True):
warnings.simplefilter('always', RuntimeWarning)
nodb_conn = connection._nodb_connection
self.assertIsNotNone(nodb_conn.settings_dict['NAME'])
self.assertEqual(nodb_conn.settings_dict['NAME'], connection.settings_dict['NAME'])
# Check a RuntimeWarning has been emitted
self.assertEqual(len(w), 1)
self.assertEqual(w[0].message.__class__, RuntimeWarning)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
new_connection = connection.copy()
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Invalidate timezone name cache, because the setting_changed
# handler cannot know about new_connection.
del new_connection.timezone_name
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
with self.settings(TIME_ZONE=new_tz):
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
new_connection = connection.copy()
new_connection.settings_dict['AUTOCOMMIT'] = False
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def test_connect_isolation_level(self):
"""
Regression test for #18130 and #24318.
"""
from psycopg2.extensions import (
ISOLATION_LEVEL_READ_COMMITTED as read_committed,
ISOLATION_LEVEL_SERIALIZABLE as serializable,
)
# Since this is a django.test.TestCase, a transaction is in progress
# and the isolation level isn't reported as 0. This test assumes that
# PostgreSQL is configured with the default isolation level.
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(connection.connection.isolation_level, read_committed)
new_connection = connection.copy()
new_connection.settings_dict['OPTIONS']['isolation_level'] = serializable
try:
# Start a transaction so the isolation level isn't reported as 0.
new_connection.set_autocommit(False)
# Check the level on the psycopg2 connection, not the Django wrapper.
self.assertEqual(new_connection.connection.isolation_level, serializable)
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql.base import psycopg2_version
version_path = 'django.db.backends.postgresql.base.Database.__version__'
with mock.patch(version_path, '2.6.9'):
self.assertEqual(psycopg2_version(), (2, 6, 9))
with mock.patch(version_path, '2.5.dev0'):
self.assertEqual(psycopg2_version(), (2, 5))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
connection.ops.last_executed_query(cursor, '', ())
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'], query)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_parameter_quoting_on_sqlite(self):
# The implementation of last_executed_queries isn't optimal. It's
# worth testing that parameters are quoted. See #14091.
query = "SELECT %s"
params = ["\"'\\"]
connection.cursor().execute(query, params)
# Note that the single quote is repeated
substituted = "SELECT '\"''\\'"
self.assertEqual(connection.queries[-1]['sql'], substituted)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_large_number_of_parameters_on_sqlite(self):
# If SQLITE_MAX_VARIABLE_NUMBER (default = 999) has been changed to be
# greater than SQLITE_MAX_COLUMN (default = 2000), last_executed_query
# can hit the SQLITE_MAX_COLUMN limit. See #26063.
cursor = connection.cursor()
sql = "SELECT MAX(%s)" % ", ".join(["%s"] * 2001)
params = list(range(2001))
# This should not raise an exception.
cursor.db.ops.last_executed_query(cursor.cursor, sql, params)
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""
An m2m save of a model with a long name and a long m2m field name
doesn't error (#8901).
"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""
Sequence resetting as part of a flush with model with long name and
long pk name doesn't error (#8901).
"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
# '%s' escaping support for sqlite3 #13648
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as unicode objects #6254
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
Test that DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
six.assertCountEqual(self, connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
Test that the backend doesn't store an unlimited number of queries.
Regression for #12581.
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connection = connection.copy()
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(str(w[0].message), "Limit for query logging "
"exceeded, only the last 3 queries will be returned.")
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retrieve the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data
without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be
able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field("related_objects").remote_field.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(SimpleTestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
equal('0.1234567890', None, 0,
'0')
equal('1234567890.1234567890', None, 0,
'1234567890')
equal('1234567890.1234567890', None, 2,
'1234567890.12')
equal('0.1234', 5, None,
'0.1234')
equal('123.12', 5, None,
'123.12')
with self.assertRaises(Rounded):
equal('0.1234567890', 5, None,
'0.12346')
with self.assertRaises(Rounded):
equal('1234567890.1234', 5, None,
'1234600000')
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite specific test.')
@skipUnlessDBFeature('can_share_in_memory_db')
class TestSqliteThreadSharing(TransactionTestCase):
available_apps = ['backends']
def test_database_sharing_in_threads(self):
def create_object():
models.Object.objects.create()
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(models.Object.objects.count(), 2)
|
decorators.py | import threading
import time
from functools import wraps
from mxdc.utils.log import get_module_logger
logger = get_module_logger(__name__)
def memoize(f):
""" Memoization decorator for functions taking one or more arguments. """
class Memodict(object):
def __init__(self, f):
self.store = {}
self.f = f
def __call__(self, *args):
if args in self.store:
return self.store[args]
else:
ret = self.store[args] = self.f(*args)
return ret
return Memodict(f)
def async_call(f):
"""
Run the specified function asynchronously in a thread. Return values will not be available
:param f: function or method
"""
from mxdc.com.ca import threads_init
def new_f(*args, **kwargs):
threads_init() # enable epics environment to be active within thread
return f(*args, **kwargs)
@wraps(f)
def _f(*args, **kwargs):
worker = threading.Thread(target=new_f, args=args, kwargs=kwargs)
worker.setDaemon(True)
worker.setName('Async Call: {}'.format(f.__name__))
worker.start()
return _f
def ca_thread_enable(f):
"""
Make sure an active EPICS CA context is available or join one before running
:param f: function or method
"""
from mxdc.com.ca import threads_init
@wraps(f)
def _f(*args, **kwargs):
threads_init()
return f(*args, **kwargs)
return _f
def timeit(method):
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
print(('%r (%r, %r) %2.2f sec' % (method.__name__, args, kw, te-ts)))
return result
return timed |
searchScaleTests.py | # searchScaleTests.py: test out the scalability of our hyperparameter searching
import os
import time
import random
import logging
from threading import Thread
from xtlib import utils
from xtlib.run import Run
from xtlib import cmd_core
from xtlib import constants
from xtlib import file_utils
from xtlib.helpers import xt_config
from xtlib.storage.store import Store
from xtlib.hparams.hparam_search import HParamSearch
class SearchScaleTester():
def __init__(self, config, store):
'''
test the scalability of runs during hyperparameter searching - involves:
- storage blobs writing
- MongoDB update of runs/metrics
- MongoDB smart retrieval of run histories
'''
self.config = config
self.store = store
self.assert_count = 0
def _assert(self, value):
assert value
self.assert_count += 1
self.threads = []
def test_scale(self, delay, duration, concurrent, child_count, reports, search_type):
self.threads = []
started = time.time()
job_id = self.store.create_job()
# start threads
for i in range(concurrent):
run_worker = Thread(target=self.runner, args=(i, job_id, delay, duration, child_count, reports, search_type))
run_worker.start()
self.threads.append(run_worker)
self.wait_for_all_threads()
elapsed = time.time() - started
print("{} runs, {} retryable MONGO errors, (elapsed: {:.2f} mins)".format(concurrent*child_count, self.store.mongo.retry_errors, elapsed/60))
def wait_for_all_threads(self):
for thread in self.threads:
thread.join()
def runner(self, concurrent_index, job_id, delay, duration, child_count, reports, search_type):
ws_name = "quick-test"
exper_name = "qtexper"
fn = "code/miniSweeps.yaml"
yd = file_utils.load_yaml(fn)
hd = yd[constants.HPARAM_DIST]
# simulate a controller for each concurrent runner
hparam_search = HParamSearch()
for index in range(child_count):
# create a new RUN record
run_name = self.store.start_run(ws_name, exper_name=exper_name, is_parent=False, job_id=job_id, node_index=0,
search_type=search_type, search_style="dynamic")
os.environ["XT_RUN_NAME"] = run_name
os.environ["XT_WORKSPACE_NAME"] = ws_name
os.environ["XT_EXPERIMENT_NAME"] = exper_name
fake_context = cmd_core.build_mock_context(self.config, job_id, ws_name, exper_name, run_name)
metric_name = fake_context.primary_metric
xt_run = Run(self.config, self.store, supress_normal_output=True)
xt_run.direct_run = True
xt_run.context = fake_context
#print(" starting: concurrent_index={}, child_index={}".format(concurrent_index, index))
# delay start
sleep_time = delay * random.random()
time.sleep(sleep_time)
hp_set = xt_run.get_next_hp_set_in_search(hd, search_type, hparam_search=hparam_search)
self._assert( "channels1" in hp_set )
# log HPARAMS
xt_run.log_hparams(hp_set)
for i in range(reports):
run_time = (duration/reports) * random.random()
time.sleep(run_time)
# log METRICS
fake_metric = random.random()
md = {"epoch": 1+i, "acc": fake_metric}
xt_run.log_metrics(md, step_name="epoch", stage="test")
# mark the run as completed
xt_run.close()
#print(" completing: {}".format(run_name))
def main(concurrent=15):
config = xt_config.get_merged_config()
store = Store(config=config)
tester = SearchScaleTester(config, store)
tester.test_scale(delay=10, duration=60, concurrent=concurrent, child_count=6, reports=5,
search_type="random")
count = tester.assert_count
return count
if __name__ == "__main__":
logger = logging.getLogger(__name__)
utils.init_logging(constants.FN_QUICK_TEST_EVENTS, logger, "XT Quick-Test")
main(concurrent=30) |
runServer.py | # encoding: utf-8
# 该文件,为无界面启动文件,以vtServer为容器,加载MainEngine
# 配置:
# self.gateway_name ,gateway 的连接名称,在vtEngine.initGateway()里面定义,对应的配置文件是 "连接名称_connect.json",
# self.strategies:启动的策略实例,须在catStrategy/CtaSetting.json 里面定义 [u'S28_RB1001', u'S28_TFT', u'S28_HCRB',u'atr_rsi']
# vtServer的ZMQ端口: 从VT_Setting.json中配置,根据AUTO_CONNCET_GW找到对应得端口配置
import sys,os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
import ctypes
from datetime import datetime, timedelta, date
from time import sleep
from threading import Thread
# 将repostory的目录i,作为根目录,添加到系统环境中。
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.append(ROOT_PATH)
# from vnpy.trader.vtEvent import *
from vnpy.rpc import RpcServer
from vnpy.trader.vtEngine import MainEngine
from vnpy.trader.gateway import ctpGateway
from vnpy.trader.setup_logger import setup_logger,get_logger
from vnpy.trader.util_monitor import *
from vnpy.trader.vtGlobal import globalSetting
from vnpy.trader.util_gpid import *
from vnpy.trader.app import ctaStrategy,riskManager
AUTO_CONNCET_GW = 'CTP'
########################################################################
class VtServer(RpcServer):
"""vn.trader 无界面服务器"""
# ----------------------------------------------------------------------
def __init__(self, repAddress, pubAddress):
"""Constructor"""
super(VtServer, self).__init__(repAddress, pubAddress)
#self.usePickle()
# gateway 是否连接
self.connected = False
# gateway 的连接名称,在vtEngine.initGateway()里面定义,对应的配置文件是 "连接名称_connect.json",
self.gateway_name = AUTO_CONNCET_GW
# 启动的策略实例,须在catStrategy/CtaSetting.json 里面定义 [u'S28_RB1001', u'S28_TFT', u'S28_HCRB',u'atr_rsi']
self.strategies = [u'Strategy_TripleMa_01_RB_Min5']
self.g_count = 0
self.disconnect_signal = 0
self.last_dt = datetime.now()
# 创建事件引擎
ee = EventEngine2()
# 创建主引擎对象
print( u'instance mainengine')
self.engine = MainEngine(ee)
# 添加CTP Gateway,配置文件为 CTP_Post
self.engine.addGateway(ctpGateway, self.gateway_name)
# 添加应用
self.engine.addApp(ctaStrategy)
self.engine.addApp(riskManager)
# 注册主引擎的方法到服务器的RPC函数
self.register(self.engine.connect)
self.register(self.engine.disconnect)
self.register(self.engine.subscribe)
self.register(self.engine.sendOrder)
self.register(self.engine.cancelOrder)
self.register(self.engine.qryAccount)
self.register(self.engine.qryPosition)
self.register(self.engine.checkGatewayStatus) # 检测gateway的连接状态
self.register(self.engine.qryStatus) # 检测ctaEngine的状态
self.register(self.engine.exit)
self.register(self.engine.writeLog)
self.register(self.engine.dbConnect)
self.register(self.engine.dbInsert)
self.register(self.engine.dbQuery)
self.register(self.engine.dbUpdate)
self.register(self.engine.getContract)
self.register(self.engine.getAllContracts)
self.register(self.engine.getOrder)
self.register(self.engine.getAllWorkingOrders)
self.register(self.engine.getAllGatewayNames)
self.register(self.engine.saveData)
self.register(self.engine.initStrategy)
self.register(self.engine.startStrategy)
self.register(self.engine.stopStrategy)
# 注册事件引擎发送的事件处理监听
self.engine.eventEngine.registerGeneralHandler(self.eventHandler)
def trade_off(self):
"""检查现在是否为非交易时间"""
now = datetime.now()
a = datetime.now().replace(hour=2, minute=35, second=0, microsecond=0)
b = datetime.now().replace(hour=8, minute=55, second=0, microsecond=0)
c = datetime.now().replace(hour=15, minute=30, second=0, microsecond=0)
d = datetime.now().replace(hour=20, minute=55, second=0, microsecond=0)
weekend = (now.isoweekday() == 6 and now >= a) or (now.isoweekday() == 7) or (now.isoweekday() == 1 and now <=b)
off = (a <= now <= b) or (c <= now <= d) or weekend
return off
def disconnect(self):
""""断开底层gateway的连接"""
if self.engine:
self.engine.disconnect(self.gateway_name)
self.connected = False
def onTimer(self, event):
"""定时器执行逻辑,每十秒执行一次"""
# 十秒才执行一次检查
self.g_count += 1
if self.g_count <= 30:
return
self.g_count = 0
dt = datetime.now()
self.engine.qryStatus()
if dt.hour != self.last_dt.hour:
self.last_dt = dt
print(u'noUiMain.py checkpoint:{0}'.format(dt))
self.engine.writeLog( u'noUiMain.py checkpoint:{0}'.format(dt))
# 定时断开
if self.trade_off():
"""非交易时间"""
if self.connected:
self.engine.writeLog(u'断开连接{0}'.format(self.gateway_name))
self.disconnect()
self.engine.writeLog(u'清空数据引擎')
self.engine.clearData()
self.connected = False
self.engine.writeNotification(u'非交易时间{0},断开连接{1}'.format(dt, self.gateway_name))
return
# 交易时间内,定时重连和检查
if not self.connected:
self.engine.writeLog(u'启动连接{0}'.format(self.gateway_name))
self.engine.writeLog(u'清空数据引擎')
self.engine.clearData()
self.engine.writeLog(u'重新连接{0}'.format(self.gateway_name))
self.engine.connect(self.gateway_name)
self.connected = True
self.disconnect_signal = 0
self.engine.writeNotification(u'{0},重新连接{1}'.format(dt, self.gateway_name))
return
else:
if not self.engine.checkGatewayStatus(self.gateway_name):
self.disconnect_signal += 1
if self.disconnect_signal >= 5:
self.engine.writeWarning(u'检查连接{0}异常,超过{1}次'.format(self.gateway_name,self.disconnect_signal))
sys.exit(0)
else:
self.disconnect_signal = 0
def start(self):
"""启动"""
super(VtServer, self).start()
# 若需要连接数据库,则启动
# self.mainEngine.dbConnect()
# 加载cta的配置
print( u'load cta setting')
self.engine.ctaEngine.loadSetting()
print(u'initialize all strategies')
# 初始化策略,如果多个,则需要逐一初始化多个
for s in self.strategies:
print( 'init trategy {0}'.format(s))
self.engine.ctaEngine.initStrategy(s)
# 逐一启动策略
print( 'start strategy {0}'.format(s))
self.engine.ctaEngine.startStrategy(s)
# 指定的连接配置
if not self.trade_off():
print( u'connect gateway:{0}'.format(self.gateway_name))
self.engine.connect(self.gateway_name)
self.connected = True
# 注册定时器,用于判断重连
self.engine.eventEngine.register(EVENT_TIMER, self.onTimer)
# 所有的日志监控
self.logM = LogMonitor(self.engine.eventEngine)
self.errorM = ErrorMonitor(self.engine.eventEngine)
self.tradeM = TradeMonitor(self.engine.eventEngine)
self.orderM = OrderMonitor(self.engine.eventEngine, self.engine)
self.positionM = PositionMonitor(self.engine.eventEngine)
self.accountM = AccountMonitor(self.engine.eventEngine)
self.engine.writeNotification(u'{0},服务启动{1}'.format(datetime.now(),self. gateway_name))
# ----------------------------------------------------------------------
def eventHandler(self, event):
"""事件处理"""
try:
# 调用RpcServer.publish()
if isinstance(event.type_, str):
self.publish(event.type_, event)
else:
self.publish(event.type_.encode('utf-8'), event)
except Exception as ex:
print( u'event Handler exception:{0}'.format(str(ex)))
# ----------------------------------------------------------------------
def stopServer(self):
"""停止服务器"""
print( 'stopServer')
# 关闭引擎
self.engine.exit()
# 停止服务器线程
self.stop()
# ----------------------------------------------------------------------
def printLog(content):
"""打印日志"""
print( datetime.now().strftime("%H:%M:%S"), '\t', content)
# ----------------------------------------------------------------------
def runServer():
"""运行服务器"""
try:
log_file_name = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'logs', u'noUiMain.log'))
except Exception as ex:
print( u'Use local dict:{0}'.format(os.getcwd()))
log_file_name = os.path.abspath(os.path.join(os.getcwd(), 'logs', u'noUiMain.log'))
setup_logger(filename=log_file_name, debug=False)
# Req/Publish端口
try:
zmqAddressDict = globalSetting['ZMQ']
zmqAddress = zmqAddressDict[AUTO_CONNCET_GW]
reqAddress = zmqAddress['ReqAddress']
pubAddress = zmqAddress['PubAddress']
except:
reqAddress = 'tcp://*:2014'
pubAddress = 'tcp://*:2016'
# 创建并启动服务器
server = VtServer(reqAddress, pubAddress)
server.start()
printLog('-' * 50)
printLog(u'Request端口:{0}'.format(reqAddress))
printLog(u'Publish端口:{0}'.format(pubAddress))
printLog(u'Trader服务器已启动')
if __name__ == '__main__':
# 主程序
thread = Thread(target=runServer, args=())
thread.start()
|
comm.py | from __future__ import absolute_import
import socket
import select
import threading
import pickle
import logging
import time
import os
def create_pickle_msg(rank, cmd, data):
"""Pickle the messages for transmission"""
msg = {"rank": rank, "cmd": cmd, "data": data}
msg = pickle.dumps(msg)
return msg
class CommServer():
"""A communication server which broadcast messages to clients."""
def __init__(self, rank, host, port, logger):
"""
Arguments:
rank: the rank of the worker.
host: the IP of the server.
port: the network port of the server.
logger: the logging handler.
"""
self._rank = rank
self._host = host
self._port = port
self._logger = logger
self._config = None
self._exit = False
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.bind((host, port))
self._sock.listen(5)
self._rsocks = [self._sock]
self._wsocks = []
self._thread = threading.Thread(target=self._run, args=())
self._thread.start()
def _run(self):
"""Collect all client sockets and receive ADDNODE message."""
while not self._exit:
try:
r_ready, w_ready, e_ready = select.select(self._rsocks, [], [], 10)
except select.error as err:
self._logger.error("Select error: {}".format(err))
return
if r_ready:
for sd in r_ready:
if sd == self._sock:
fd, addr = self._sock.accept()
self._logger.debug('accept socket: {}, addr: {}'.format(fd, addr))
self._rsocks.append(fd)
self._wsocks.append(fd)
else:
msg = sd.recv(4096)
if not msg:
continue
msg = pickle.loads(msg)
self._logger.debug("Worker {} received message: {}".format(self._rank, msg))
rank = msg["rank"]
cmd = msg["cmd"]
if cmd == "ADDNODE":
self._logger.debug("Node {} is added.".format(rank))
def shutdown(self):
"""Shut the server down."""
self._exit = True
self._thread.join()
for sock in self._rsocks:
sock.close()
self._logger.debug("shutdown worker {}".format(self._rank))
def broadcast(self, data):
""" Broadcast a config (partition_size, credit_size) to all other workers.
Arguments:
data: the configuration to be sent to other workers.
"""
self._config = data
msg = create_pickle_msg(self._rank, "DATA", data)
for sock in self._wsocks:
sock.send(msg)
def get(self):
"""Return the received configuration."""
if self._config is not None:
config = self._config
self._config = None
return config
class CommClient():
"""A communication client which registers to server and receives messages."""
def __init__(self, rank, host, port, logger):
"""
Arguments:
rank: the rank of the worker.
host: the IP of the server.
port: the network port of the server.
logger: the logging handler.
"""
self._rank = rank
self._host = host
self._port = port
self._logger = logger
self._config = None
self._exit = False
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._thread = threading.Thread(target=self._run, args=())
self._thread.start()
def shutdown(self):
"""Stop thread and close socket."""
self._exit = True
self._thread.join()
self._sock.close()
self._logger.debug("shutdown worker {}".format(self._rank))
def _run(self):
"""Listening to `CommServer` to get configuration."""
while not self._exit:
try:
self._sock.connect((self._host, self._port))
break
except Exception as err:
self._logger.debug("Error connect to rank 0: {}".format(err))
time.sleep(1)
# Register with server
msg = create_pickle_msg(self._rank, "ADDNODE", "")
self._sock.send(msg)
while not self._exit:
try:
r_ready, w_ready, e_ready = select.select([self._sock], [], [], 10)
except select.error as err:
self._logger.error("Select error: {}".format(err))
return
if r_ready:
msg = self._sock.recv(4096)
if not msg:
continue
msg = pickle.loads(msg)
self._logger.debug("Received msg: {}".format(msg))
cmd = msg["cmd"]
if cmd == "DATA":
self._config = msg["data"]
else:
self._logger.error("Received unknown cmd {}".format(cmd))
def get(self):
"""Return the received configuration."""
if self._config is not None:
config = self._config
self._config = None
return config
def create_comm(rank, host=None, port=None, logger=None):
"""Launch a server in worker 0 and a client in other workers.
Arguments:
host: Network IP of worker 0. If not specified, use BYTESCHEDULER_ROOT_IP.
port: Network port of worker 0. If not specified, use BYTESCHEDULER_ROOT_PORT.
"""
if host is None:
host = os.getenv("BYTESCHEDULER_ROOT_IP", "")
if port is None:
port = int(os.getenv("BYTESCHEDULER_ROOT_PORT", -1))
assert host != "", "Unknown BYTESCHEDULER_ROOT_IP!"
assert port >= 0, "Unknown BYTESCHEDULER_ROOT_PORT!"
if logger is None:
logger = logging.getLogger("ByteScheduler")
logger.info("Comm host: {}, port: {}".format(host, port))
if rank == 0:
comm = CommServer(rank, host, port, logger)
else:
comm = CommClient(rank, host, port, logger)
return comm
|
plotdata.py | #!/usr/bin/env python
# Install pyqtgraph with:
# sudo apt install python-pyqtgraph
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import os, sys
import collections
import random
import time
import math
import numpy as np
import Queue
import threading
import struct
import socket
import select
print(sys.argv[1:])
# plot source
if len(sys.argv) >= 2:
src_file = sys.argv[1]
else:
print('Usage: plotdata.py [file] <numlines=1> <ymin=-1> <ymax=1>')
sys.exit(0)
# plot parameters
if len(sys.argv) >= 3:
num_lines = int(sys.argv[2])
else:
num_lines = 1
print('Using %d curves. Requires %d columns.' % (num_lines, num_lines+1))
if len(sys.argv) >= 4:
y_min = float(sys.argv[3])
else:
y_min = -1
if len(sys.argv) >= 5:
y_max = float(sys.argv[4])
else:
y_max = 1
class DynamicPlotter():
def __init__(self, sampleinterval = 0.1, timewindow = 10.0, size=(1024, 640)):
self._ncurve = num_lines
self._interval = int(sampleinterval * 1000)
self._bufsize = int(timewindow / sampleinterval)
self.databuffers = [ collections.deque([0.0]*self._bufsize, self._bufsize) for i in range(self._ncurve)]
self.x = np.linspace(-timewindow, 0.0, self._bufsize)
self.ys = [ np.zeros(self._bufsize, dtype=np.float) for i in range(self._ncurve)]
# Set up plot
self.app = QtGui.QApplication([])
self.plt = pg.plot(title='Dynamic Plotting with PyQtGraph')
self.plt.resize(*size)
self.plt.showGrid(x=True, y=True)
self.plt.setYRange(y_min, y_max)
self.plt.setLabel('left', 'amplitude', 'V')
self.plt.setLabel('bottom', 'time', 's')
self.plt.setMouseEnabled(False, True)
self.c0 = self.plt.plot(self.x, self.ys[0], pen=(255,0,0));
self.c1 = self.plt.plot(self.x, self.ys[0], pen=(0 ,255,0));
self.c2 = self.plt.plot(self.x, self.ys[0], pen=(0 ,0,255));
self.c3 = self.plt.plot(self.x, self.ys[0], pen=(255,255,0));
self.c4 = self.plt.plot(self.x, self.ys[0], pen=(0,255,255));
self.c5 = self.plt.plot(self.x, self.ys[0], pen=(255,0,255));
self.c6 = self.plt.plot(self.x, self.ys[0], pen=(255,255,255));
# extruder temperature
self.c7 = self.plt.plot(self.x, self.ys[0], pen=(0, 100, 0));
# plotted curve
self.curves = [ self.c0, self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.c7];
self.curves = self.curves[0:num_lines]
# QTimer to refresh display
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.updateplot)
self.timer.start(self._interval)
# Producer thread that adds data from log file
self.dataQueue = Queue.Queue()
self.queueLock = threading.Lock()
t = threading.Thread(target=self.producer, args=(self.dataQueue,self.queueLock))
t.daemon = True
t.start()
def openLog(self):
self.logfile = open(src_file, 'r')
self.logfile.seek(0, os.SEEK_END)
# This function uses a somewhat loosely defined heuristic.
# We read one line at a time from the log. If the file is written into,
# We may sometimes get an incomplete line. In that case, we probably
# got an EOF on the current file descriptor, so, we read the current
# file position, reopen, seek to the position and keep reading.
# If we notice that the file got smaller, we assume the log got
# rotated underneath us, so, we simply throw away the partial line
# we read, reopen the file and start afresh. If we successfuly
# reopen and seek into the file, we simply try reading the rest of
# the line.
def readLogLine(self):
line = ""
while True:
line = line + self.logfile.readline()
if not line:
# Empty line. See if the log got rotated
currentPos = self.logfile.tell()
while True:
try:
self.logfile = open(src_file, 'r')
fileSize = os.stat(src_file).st_size
if fileSize < currentPos:
# File got smaller which means log got rotated,
# so, we need to reopen and start from scratch
print "Log got rotated"
line = ""
else:
# More data (likely) got appended. Just continue
# from where we left off.
self.logfile.seek(currentPos)
break
except:
# If we got an exception, we couldn't open or stat
# the file, which means rotation is under progress
pass
continue
if line[len(line) - 1] == '\n':
break
return line
def producer(self, dataQueue, queueLock):
self.openLog()
while True:
line = self.readLogLine()
num = line.split()
try:
with queueLock:
print('N=%d, line=%s' % (len(num), line))
if len(num) >= num_lines+1:
for i in range(0,num_lines):
self.databuffers[i].append(float(num[i+1]))
except:
pass
def updateplot(self):
filterWindow = 31
for i in range(self._ncurve):
with self.queueLock:
self.ys[i][:] = self.databuffers[i]
self.curves[i].setData(self.x, self.ys[i])
def run(self):
self.app.exec_()
if __name__ == '__main__':
m = DynamicPlotter(sampleinterval = 0.05, timewindow = 20.0)
m.run()
|
variable_scope.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import copy
import enum # pylint: disable=g-bad-import-order
import functools
import sys
import threading
import traceback
import six
from six import iteritems
from six.moves import xrange, zip # pylint: disable=redefined-builtin
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"AUTO_REUSE", "VariableScope", "get_variable_scope", "get_variable",
"get_local_variable", "variable_scope", "variable_op_scope",
"no_regularizer", "VariableSynchronization", "VariableAggregation"
]
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/resource_variables",
"Whether variable_scope.enable_resource_variables() is called.")
class _PartitionInfo(object):
"""Holds partition info used by initializer functions."""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape of
the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for offset, shape in zip(var_offset, full_shape):
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(shape),
self.full_shape,
len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _ReuseMode(enum.Enum):
"""Mode for variable access within a variable scope."""
# Indicates that variables are to be fetched if they already exist or
# otherwise created.
AUTO_REUSE = 1
# TODO(alive): For TensorFlow 2.0, Deprecate True/False/None API in favor of
# enum values.
# REUSE_FALSE = 2
# REUSE_TRUE = 3
# TODO(apassos) remove these forwarding symbols.
VariableSynchronization = variables.VariableSynchronization # pylint: disable=invalid-name
VariableAggregation = variables.VariableAggregation # pylint: disable=invalid-name
AUTO_REUSE = _ReuseMode.AUTO_REUSE
tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
AUTO_REUSE.__doc__ = """
When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
"""
_DEFAULT_USE_RESOURCE = tf2.enabled()
@tf_export(v1=["enable_resource_variables"])
def enable_resource_variables():
"""Creates resource variables by default.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = True
_api_usage_gauge.get_cell().set(True)
@tf_export(v1=["resource_variables_enabled"])
def resource_variables_enabled():
"""Returns `True` if resource variables are enabled.
Resource variables are improved versions of TensorFlow variables with a
well-defined memory model. Accessing a resource variable reads its value, and
all ops which access a specific read value of the variable are guaranteed to
see the same value for that tensor. Writes which happen after a read (by
having a control or data dependency on the read) are guaranteed not to affect
the value of the read tensor, and similarly writes which happen before a read
are guaranteed to affect the value. No guarantees are made about unordered
read/write pairs.
Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0
feature.
"""
global _DEFAULT_USE_RESOURCE
return _DEFAULT_USE_RESOURCE
@deprecation.deprecated(
None, "non-resource variables are not supported in the long term")
@tf_export(v1=["disable_resource_variables"])
def disable_resource_variables():
"""Opts out of resource variables.
If your code needs tf.disable_resource_variables() to be called to work
properly please file a bug.
"""
global _DEFAULT_USE_RESOURCE
_DEFAULT_USE_RESOURCE = False
_api_usage_gauge.get_cell().set(False)
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys and
the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self._store_eager_variables = False
def get_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables. When eager execution is enabled this argument is always
forced to be False.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable`
defaults to `True`, unless `synchronization` is set to `ON_READ`, in
which case it defaults to `False`.
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be true.
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method. The signature
of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes: `def
custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed: `def
custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs): return getter(name +
'_suffix', *args, **kwargs) ```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
RuntimeError: when eager execution is enabled and not called from an
EagerVariableStore.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError("Passed a custom_getter which is not callable: %s" %
custom_getter)
with ops.init_scope():
if context.executing_eagerly():
# Variable creation and initialization takes place in `init_scope`s;
# as such, if an `init_scope` lifts us into the eager context, then we
# need to use `ResourceVariable`s.
use_resource = True
# Note that it's fine to reuse eager variables whose initialization was
# lifted from a function-building graph into the eager context (that's why
# the following clause is not wrapped in an `init_scope`); lifted variables
# are tracked by the graph's `VariableStore`.
if context.executing_eagerly():
if not self._store_eager_variables and reuse:
raise RuntimeError(
"When eager execution is enabled variable reuse is only supported"
" when an EagerVariableStore is active. See the documentation on"
" EagerVariableStore for example usage.")
if self._store_eager_variables:
reuse = AUTO_REUSE
# If a *_ref type is passed in an error would be triggered further down the
# stack. We prevent this using base_dtype to get a non-ref version of the
# type, before doing anything else. When _ref types are removed in favor of
# resources, this line can be removed.
try:
dtype = dtype.base_dtype
except AttributeError:
# .base_dtype not existing means that we will try and use the raw dtype
# which was passed in - this might be a NumPy type which is valid.
pass
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter( # pylint: disable=missing-docstring
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
is_scalar = (
shape is not None and isinstance(shape, collections_lib.Sequence) and
not shape)
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError("Partitioner must be callable, but received: %s" %
partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
synchronization, aggregation, trainable = (
variables.validate_synchronization_aggregation_trainable(
synchronization, aggregation, trainable, name))
if custom_getter is not None:
# Handle backwards compatibility with getter arguments that were added
# to the API after users started writing custom getters.
custom_getter_kwargs = {
"getter": _true_getter,
"name": name,
"shape": shape,
"dtype": dtype,
"initializer": initializer,
"regularizer": regularizer,
"reuse": reuse,
"trainable": trainable,
"collections": collections,
"caching_device": caching_device,
"partitioner": partitioner,
"validate_shape": validate_shape,
"use_resource": use_resource,
"synchronization": synchronization,
"aggregation": aggregation,
}
# `fn_args` and `has_kwargs` can handle functions, `functools.partial`,
# `lambda`.
if ("constraint" in function_utils.fn_args(custom_getter) or
function_utils.has_kwargs(custom_getter)):
custom_getter_kwargs["constraint"] = constraint
return custom_getter(**custom_getter_kwargs)
else:
return _true_getter(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
name,
partitioner,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
variables to be created if they don't exist or returned if they do.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable (defaults to
`DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to
synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
partitions = None
if not reuse or partitioner:
partitions = _call_partitioner(partitioner, shape, dtype)
if name in self._partitioned_vars:
if reuse is False:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True or reuse=tf.AUTO_REUSE in VarScope?" % name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s." % (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s." % (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (partitions is not None and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if reuse is True:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=False or reuse=tf.AUTO_REUSE in VarScope?" % name)
slice_dim, num_slices = _get_slice_dim_and_num_slices(partitions)
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not." %
(num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d." %
(num_slices, name, name, num_slices))
vs = []
for i, (var_offset, var_shape) in enumerate(
_iter_slices(shape.as_list(), num_slices, slice_dim)):
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: disable=protected-access
var._set_save_slice_info(
variables.Variable.SaveSliceInfo(name, shape.as_list(), var_offset,
var_shape))
vs.append(var)
# pylint: enable=protected-access
partitioned_var = variables.PartitionedVariable(
name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
if not context.executing_eagerly() or self._store_eager_variables:
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Get or create a single Variable (e.g.
a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
constraint: see get_variable.
synchronization: see get_variable.
aggregation: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if reuse is False:
var = self._vars[name]
err_msg = ("Variable %s already exists, disallowed."
" Did you mean to set reuse=True or "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# ResourceVariables don't have an op associated with so no traceback
if isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(err_msg)
tb = var.op.traceback[::-1]
# Throw away internal tf entries and only take a few lines. In some
# cases the traceback can be longer (e.g. if someone uses factory
# functions to create variables) so we take more than needed in the
# default case.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:5]
raise ValueError("%s Originally defined at:\n\n%s" %
(err_msg, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." %
(name, shape, found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." %
(name, dtype_str, found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if reuse is True:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set "
"reuse=tf.AUTO_REUSE in VarScope?" % name)
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Enter an init scope when creating the initializer.
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if tf_inspect.isclass(initializer):
initializer = initializer()
if shape is not None and shape.is_fully_defined():
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(),
dtype=dtype,
partition_info=partition_info)
variable_dtype = dtype.base_dtype
elif len(tf_inspect.getargspec(initializer).args) == len(
tf_inspect.getargspec(initializer).defaults or []):
init_val = initializer
variable_dtype = None
else:
raise ValueError("The initializer passed is not valid. It should "
"be a callable with no arguments and the "
"shape should not be provided or an instance of "
"`tf.keras.initializers.*' and `shape` should be "
"fully defined.")
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = _DEFAULT_USE_RESOURCE
v = variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
if context.executing_eagerly() and self._store_eager_variables:
if collections:
ops.add_to_collections(collections, v)
else:
ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, v)
if trainable:
ops.add_to_collection(ops.GraphKeys.TRAINABLE_VARIABLES, v)
if not context.executing_eagerly() or self._store_eager_variables:
# In eager mode we do not want to keep default references to Variable
# objects as this will prevent their memory from being released.
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v):
with ops.name_scope(name + "/Regularizer/"):
with ops.init_scope():
loss = regularizer(v)
if loss is not None:
if context.executing_eagerly():
v_name = "v_%s" % type(v)
loss_name = "loss_%s" % type(loss)
else:
v_name = v.name
loss_name = loss.name
logging.vlog(
1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v_name, loss_name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
del shape
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif (dtype.is_integer or dtype.is_unsigned or dtype.is_bool or
dtype == dtypes.string):
initializer = init_ops.zeros_initializer()
initializing_from_value = False
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required" %
(name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
@tf_export(v1=["no_regularizer"])
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
# TODO(alive): support caching devices and partitioned variables in Eager mode.
@tf_export(v1=["VariableScope"])
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in
get_variable. When eager execution is enabled this argument is always
forced to be False.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults to
False (will later change to True). When eager execution is enabled this
argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if context.executing_eagerly():
if self._caching_device is not None:
raise NotImplementedError("Caching devices is not yet supported "
"when eager execution is enabled.")
self._reuse = AUTO_REUSE
self._use_resource = True
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
@property
def constraint(self):
return self._constraint
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
if context.executing_eagerly() and not use_resource:
raise ValueError("When eager execution is enabled, "
"use_resource cannot be set to false.")
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
if context.executing_eagerly():
raise NotImplementedError("Caching devices are not yet supported "
"when eager execution is enabled.")
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + "/" if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def local_variables(self):
"""Get this scope's local variables."""
return self.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
reuse=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
if context.executing_eagerly():
reuse = False
use_resource = True
else:
if reuse is None:
reuse = self._reuse
if use_resource is None:
use_resource = self._use_resource
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if constraint is None:
constraint = self._constraint
if dtype is None:
dtype = self._dtype
return var_store.get_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if constraint is None:
constraint = self._constraint
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=self.reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPESTORE_KEY = ("__varscope",)
class _VariableScopeStore(threading.local):
"""A thread local store for the current variable scope and scope counts."""
def __init__(self):
super(_VariableScopeStore, self).__init__()
self.current_scope = VariableScope(False)
self.variable_scopes_count = {}
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in list(self.variable_scopes_count.keys()):
if scope_name is None or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable_scope_store():
"""Returns the variable scope store for current thread."""
scope_store = ops.get_collection(_VARSCOPESTORE_KEY)
if not scope_store:
scope_store = _VariableScopeStore()
ops.add_to_collection(_VARSCOPESTORE_KEY, scope_store)
else:
scope_store = scope_store[0]
return scope_store
@tf_export(v1=["get_variable_scope"])
def get_variable_scope():
"""Returns the current variable scope."""
return get_variable_scope_store().current_scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
@tf_contextlib.contextmanager
def with_variable_store(store):
store_collection = ops.get_collection_ref(_VARSTORE_KEY)
old = list(store_collection)
store_collection[:] = [store]
try:
yield
finally:
store_collection[:] = old
class EagerVariableStore(object):
"""Wrapper allowing functional layers to be used with eager execution.
When eager execution is enabled Variables get deleted when they go out of
scope, and are not stored in global collections by default. A lot of code
(mostly the functional layers in tf.layers) assumes that variables are kept in
a global list.
EagerVariableStore can be used in conjunction with this code to make it
eager-friendly. For example, to create a dense layer, use:
```
container = tfe.EagerVariableStore()
for input in dataset_iterator:
with container.as_default():
x = tf.compat.v1.layers.dense(input, name="l1")
print(container.variables) # Should print the variables used in the layer.
```
"""
def __init__(self, store=None):
if store is not None:
if not store._store_eager_variables: # pylint: disable=protected-access
raise ValueError("Cannot construct EagerVariableStore from a "
"VariableStore object that does not hold eager "
"variables.")
self._store = store
else:
self._store = _VariableStore()
self._store._store_eager_variables = True # pylint: disable=protected-access
def as_default(self):
return with_variable_store(self._store)
def variables(self):
return sorted(self._store._vars.values(), key=lambda x: x.name) # pylint: disable=protected-access
def trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def non_trainable_variables(self):
# pylint: disable=protected-access
return sorted([x for x in self._store._vars.values() if not x.trainable],
key=lambda x: x.name)
# pylint: enable=protected-access
def copy(self):
"""Copy this variable store and all of its contents.
Variables contained in this store will be copied over to the new variable
store, meaning that they can be modified without affecting the variables in
this store.
Returns:
A new EagerVariableStore instance containing copied variables.
"""
# pylint: disable=protected-access
new_store = EagerVariableStore()
for key, var in iteritems(self._store._vars):
# Strip device out of variable name.
try:
index = var.name.index(":")
except ValueError:
stripped_var_name = var.name
else:
stripped_var_name = var.name[:index]
# Create new variable with same value, name, and "trainable" flag.
new_var = resource_variable_ops.ResourceVariable(
var.read_value(), name=stripped_var_name, trainable=var.trainable)
new_store._store._vars[key] = new_var
return new_store
# pylint: enable=protected-access
# The argument list for get_variable must match arguments to get_local_variable.
# So, if you are updating the arguments, also update arguments to
# get_local_variable below.
@tf_export(v1=["get_variable"])
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
return get_variable_scope().get_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
custom_getter=custom_getter,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
get_variable_or_local_docstring = ("""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:
```python
def foo():
with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
v = tf.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created. Can either be
an initializer object or a Tensor. If it's a Tensor, its shape must be known
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
`tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known. For this to be used the initializer must be a Tensor and
not an initializer object.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True). When eager execution is
enabled this argument is always forced to be True.
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.", "",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n ",
"GraphKeys.GLOBAL_VARIABLES")
# The argument list for get_local_variable must match arguments to get_variable.
# So, if you are updating the arguments, also update arguments to get_variable.
@tf_export(v1=["get_local_variable"])
def get_local_variable( # pylint: disable=missing-docstring
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=False, # pylint: disable=unused-argument
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
if collections:
collections += [ops.GraphKeys.LOCAL_VARIABLES]
else:
collections = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=False,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
custom_getter=custom_getter,
constraint=constraint)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n", "", "GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of applying
it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to. Defaults
to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache on the
device where the Ops using the Variable reside, to deduplicate copying
through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a value
of unknown shape. If True, the default, the shape of initial_value must be
known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
synchronization: Indicates when a distributed a variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
# pylint: enable=protected-access
# Named like a function for compatibility with the previous
# @tf_contextlib.contextmanager definition.
class _pure_variable_scope(object): # pylint: disable=invalid-name
"""A context for the variable_scope, see `variable_scope` for docs."""
def __init__(self,
name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None,
constraint=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or None, or tf.compat.v1.AUTO_REUSE; if `None`, we inherit
the parent scope's reuse flag.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
"""
self._name_or_scope = name_or_scope
self._reuse = reuse
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._old_name_scope = old_name_scope
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
self._var_store = _get_default_variable_store()
self._var_scope_store = get_variable_scope_store()
self._last_variable_scope_object = None
if isinstance(self._name_or_scope, VariableScope):
self._new_name = self._name_or_scope.name
name_scope = self._name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope. We create a new
# VariableScope (self._var_scope_object) that contains a copy of the
# provided shared scope, possibly with changed reuse and initializer, if
# the user requested this.
variable_scope_object = VariableScope(
self._name_or_scope.reuse if not self._reuse else self._reuse,
name=self._new_name,
initializer=self._name_or_scope.initializer,
regularizer=self._name_or_scope.regularizer,
caching_device=self._name_or_scope.caching_device,
partitioner=self._name_or_scope.partitioner,
dtype=self._name_or_scope.dtype,
custom_getter=self._name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=self._name_or_scope.use_resource,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._name_or_scope.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._cached_variable_scope_object = variable_scope_object
def __enter__(self):
"""Begins the scope block.
Returns:
A VariableScope.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
self._old = self._var_scope_store.current_scope
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.open_variable_scope(self._new_name)
self._old_subscopes = copy.copy(
self._var_scope_store.variable_scopes_count)
variable_scope_object = self._cached_variable_scope_object
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
self._new_name = (
self._old.name + "/" +
self._name_or_scope if self._old.name else self._name_or_scope)
self._reuse = (self._reuse or
self._old.reuse) # Re-using is inherited by sub-scopes.
if self._old_name_scope is None:
name_scope = self._name_or_scope
else:
name_scope = self._old_name_scope
variable_scope_object = VariableScope(
self._reuse,
name=self._new_name,
initializer=self._old.initializer,
regularizer=self._old.regularizer,
caching_device=self._old.caching_device,
partitioner=self._old.partitioner,
dtype=self._old.dtype,
use_resource=self._old.use_resource,
custom_getter=self._old.custom_getter,
name_scope=name_scope,
constraint=self._constraint)
if self._initializer is not None:
variable_scope_object.set_initializer(self._initializer)
if self._regularizer is not None:
variable_scope_object.set_regularizer(self._regularizer)
if self._caching_device is not None:
variable_scope_object.set_caching_device(self._caching_device)
if self._partitioner is not None:
variable_scope_object.set_partitioner(self._partitioner)
if self._custom_getter is not None:
variable_scope_object.set_custom_getter(
_maybe_wrap_custom_getter(self._custom_getter,
self._old.custom_getter))
if self._dtype is not None:
variable_scope_object.set_dtype(self._dtype)
if self._use_resource is not None:
variable_scope_object.set_use_resource(self._use_resource)
self._var_scope_store.open_variable_scope(self._new_name)
self._var_scope_store.current_scope = variable_scope_object
self._last_variable_scope_object = variable_scope_object
return variable_scope_object
def __exit__(self, type_arg, value_arg, traceback_arg):
if (self._var_scope_store.current_scope is
not self._last_variable_scope_object):
raise RuntimeError("Improper nesting of variable_scope.")
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(self._name_or_scope, VariableScope):
self._var_scope_store.variable_scopes_count = self._old_subscopes
else:
self._var_scope_store.close_variable_subscopes(self._new_name)
self._var_scope_store.current_scope = self._old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(functools.partial(old_getter, getter), *args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_scope_store = get_variable_scope_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_scope_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_scope_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["variable_scope"]) # pylint: disable=invalid-name
class variable_scope(object):
"""A context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from the same
graph, ensures that graph is the default graph, and pushes a name scope and a
variable scope.
If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will be made unique by appending `_N`
to it.
Variable scope allows you to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.compat.v1.variable_scope("foo"):
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Simple example of how to reenter a premade variable scope safely:
```python
with tf.compat.v1.variable_scope("foo") as vs:
pass
# Re-enter the variable scope.
with tf.compat.v1.variable_scope(vs,
auxiliary_name_scope=False) as vs1:
# Restore the original name_scope.
with tf.name_scope(vs1.original_name_scope):
v = tf.compat.v1.get_variable("v", [1])
assert v.name == "foo/v:0"
c = tf.constant([1], name="c")
assert c.name == "foo/c:0"
```
Basic example of sharing a variable AUTO_REUSE:
```python
def foo():
with tf.compat.v1.variable_scope("foo", reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable("v", [1])
return v
v1 = foo() # Creates v.
v2 = foo() # Gets the same, existing v.
assert v1 == v2
```
Basic example of sharing a variable with reuse=True:
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope("foo", reuse=True):
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.compat.v1.variable_scope("foo") as scope:
v = tf.compat.v1.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.compat.v1.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when getting
an existing variable in a non-reusing scope.
```python
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [1])
v1 = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that does not
exist in reuse mode.
```python
with tf.compat.v1.variable_scope("foo", reuse=True):
v = tf.compat.v1.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope, then all
its sub-scopes become reusing as well.
A note about name scoping: Setting `reuse` does not impact the naming of other
ops such as mult. See related discussion on
[github#6189](https://github.com.cnpmjs.org/tensorflow/tensorflow/issues/6189)
Note that up to and including version 1.0, it was allowed (though explicitly
discouraged) to pass False to the reuse argument, yielding undocumented
behaviour slightly different from None. Starting at 1.1.0 passing None and
False as reuse has exactly the same effect.
A note about using variable scopes in multi-threaded environment: Variable
scopes are thread local, so one thread will not see another thread's current
scope. Also, when using `default_name`, unique scopes names are also generated
only on a per thread basis. If the same name was used within a different
thread, that doesn't prevent a new thread from creating the same scope.
However, the underlying variable store is shared across threads (within the
same graph). As such, if another thread tries to create a new variable with
the same name as a variable created by a previous thread, it will fail unless
reuse is True.
Further, each thread starts with an empty variable scope. So if you wish to
preserve name prefixes from a scope from the main thread, you should capture
the main thread's scope and re-enter it in each thread. For e.g.
```
main_thread_scope = variable_scope.get_variable_scope()
# Thread's target function:
def thread_target_fn(captured_scope):
with variable_scope.variable_scope(captured_scope):
# .... regular code for this thread
thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
```
"""
def __init__(self,
name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None,
auxiliary_name_scope=True):
"""Initialize the context manager.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True`, None, or tf.compat.v1.AUTO_REUSE; if `True`, we go into
reuse mode for this scope as well as all sub-scopes; if
tf.compat.v1.AUTO_REUSE, we create variables if they do not exist, and
return them otherwise; if None, we inherit the parent scope's reuse
flag. When eager execution is enabled, new variables are always created
unless an EagerVariableStore or template is currently active.
dtype: type of variables created in this scope (defaults to the type in
the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True). When eager
execution is enabled this argument is always forced to be True.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value (which must have
the same shape). Constraints are not safe to use when doing asynchronous
distributed training.
auxiliary_name_scope: If `True`, we create an auxiliary name scope with
the scope. If `False`, we don't create it. Note that the argument is not
inherited, and it only takes effect for once when creating. You should
only use it for re-entering a premade variable scope.
Returns:
A scope that can be captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
self._name_or_scope = name_or_scope
self._default_name = default_name
self._values = values
self._initializer = initializer
self._regularizer = regularizer
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._reuse = reuse
self._dtype = dtype
self._use_resource = use_resource
self._constraint = constraint
if self._default_name is None and self._name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if self._reuse is False:
# We don't allow non-inheriting scopes, False = None here.
self._reuse = None
if not (self._reuse is True
or self._reuse is None
or self._reuse is AUTO_REUSE):
raise ValueError("The reuse parameter must be True or False or None.")
if self._values is None:
self._values = []
self._in_graph_mode = not context.executing_eagerly()
if self._in_graph_mode:
self._graph = ops._get_graph_from_inputs(self._values) # pylint: disable=protected-access
self._cached_pure_variable_scope = None
self._current_name_scope = None
if not isinstance(auxiliary_name_scope, bool):
raise TypeError("The auxiliary_name_scope must be `True` or `False`, "
"while get {}".format(auxiliary_name_scope))
self._auxiliary_name_scope = auxiliary_name_scope
def __enter__(self):
# If the default graph is building a function, then we should not replace it
# with the cached graph.
if ops.get_default_graph().building_function:
self._building_function = True
else:
self._building_function = False
if self._in_graph_mode and not self._building_function:
self._graph_context_manager = self._graph.as_default()
self._graph_context_manager.__enter__()
if self._cached_pure_variable_scope is not None:
# Fast path for re-entering variable_scopes. We've held on to the pure
# variable scope from a previous successful __enter__, so we avoid some
# overhead by re-using that object.
if self._current_name_scope is not None:
self._current_name_scope.__enter__()
return self._cached_pure_variable_scope.__enter__()
try:
return self._enter_scope_uncached()
except:
if (self._in_graph_mode and not self._building_function and
self._graph_context_manager is not None):
self._graph_context_manager.__exit__(*sys.exc_info())
raise
def _enter_scope_uncached(self):
"""Enters the context manager when there is no cached scope yet.
Returns:
The entered variable scope.
Raises:
TypeError: A wrong type is passed as `scope` at __init__().
ValueError: `reuse` is incorrectly set at __init__().
"""
if self._auxiliary_name_scope:
# Create a new name scope later
current_name_scope = None
else:
# Reenter the current name scope
name_scope = ops.get_name_scope()
if name_scope:
# Hack to reenter
name_scope += "/"
current_name_scope = ops.name_scope(name_scope)
else:
# Root scope
current_name_scope = ops.name_scope(name_scope)
# IMPORTANT: Only assign to self._cached_pure_variable_scope and
# self._current_name_scope after successful __enter__() calls.
if self._name_or_scope is not None:
if not isinstance(self._name_or_scope,
(VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(self._name_or_scope, six.string_types):
name_scope = self._name_or_scope
else:
name_scope = self._name_or_scope.name.split("/")[-1]
if name_scope or current_name_scope:
current_name_scope = current_name_scope or ops.name_scope(name_scope)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
if isinstance(self._name_or_scope, six.string_types):
old_name_scope = current_name_scope_name
else:
old_name_scope = self._name_or_scope.original_name_scope
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=old_name_scope,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else:
self._current_name_scope = None
# This can only happen if someone is entering the root variable scope.
pure_variable_scope = _pure_variable_scope(
self._name_or_scope,
reuse=self._reuse,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
else: # Here name_or_scope is None. Using default name, but made unique.
if self._reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
current_name_scope = current_name_scope or ops.name_scope(
self._default_name)
try:
current_name_scope_name = current_name_scope.__enter__()
except:
current_name_scope.__exit__(*sys.exc_info())
raise
self._current_name_scope = current_name_scope
unique_default_name = _get_unique_variable_scope(self._default_name)
pure_variable_scope = _pure_variable_scope(
unique_default_name,
initializer=self._initializer,
regularizer=self._regularizer,
caching_device=self._caching_device,
partitioner=self._partitioner,
custom_getter=self._custom_getter,
old_name_scope=current_name_scope_name,
dtype=self._dtype,
use_resource=self._use_resource,
constraint=self._constraint)
try:
entered_pure_variable_scope = pure_variable_scope.__enter__()
except:
pure_variable_scope.__exit__(*sys.exc_info())
raise
self._cached_pure_variable_scope = pure_variable_scope
return entered_pure_variable_scope
def __exit__(self, type_arg, value_arg, traceback_arg):
try:
self._cached_pure_variable_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
try:
if self._current_name_scope:
self._current_name_scope.__exit__(type_arg, value_arg,
traceback_arg)
finally:
if self._in_graph_mode and not self._building_function:
self._graph_context_manager.__exit__(type_arg, value_arg,
traceback_arg)
# pylint: disable=g-doc-return-or-yield
@tf_export(v1=["variable_op_scope"])
@tf_contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None,
constraint=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(
name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource,
constraint=constraint) as scope:
yield scope
def _call_partitioner(partitioner, shape, dtype):
"""Call partitioner validating its inputs/output.
Args:
partitioner: a function mapping `Tensor` shape and dtype to a list of
partitions.
shape: shape of the `Tensor` to partition, must have at least two
dimensions.
dtype: dtype of the elements in the `Tensor`.
Returns:
A list with elements >=1 and exactly one >1. The index of that
element corresponds to the partitioning axis.
"""
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable must be "
"fully defined, but instead was %s." % (shape,))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
slicing = partitioner(shape=shape, dtype=dtype)
if not isinstance(slicing, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s" %
slicing)
if len(slicing) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (slicing, shape))
if any(p < 1 for p in slicing):
raise ValueError("Partitioner returned zero partitions for some axes: %s" %
slicing)
if sum(p > 1 for p in slicing) > 1:
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, partitioning: %s" % (shape, slicing))
return slicing
# TODO(slebedev): could be inlined, but
# `_VariableStore._get_partitioned_variable` is too complex even
# without this logic.
def _get_slice_dim_and_num_slices(slicing):
"""Get slicing dimension and number of slices from the partitioner output."""
for slice_dim, num_slices in enumerate(slicing):
if num_slices > 1:
break
else:
# Degenerate case: no partitioning applied.
slice_dim = 0
num_slices = 1
return slice_dim, num_slices
def _iter_slices(full_shape, num_slices, slice_dim):
"""Slices a given a shape along the specified dimension."""
num_slices_with_excess = full_shape[slice_dim] % num_slices
offset = [0] * len(full_shape)
min_slice_len = full_shape[slice_dim] // num_slices
for i in xrange(num_slices):
shape = full_shape[:]
shape[slice_dim] = min_slice_len + bool(i < num_slices_with_excess)
yield offset[:], shape
offset[slice_dim] += shape[slice_dim]
def default_variable_creator(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
collections = kwargs.get("collections", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
expected_shape = kwargs.get("expected_shape", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
use_resource = kwargs.get("use_resource", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
if use_resource is None:
use_resource = get_variable_scope().use_resource
if use_resource is None:
use_resource = _DEFAULT_USE_RESOURCE
use_resource = use_resource or context.executing_eagerly()
if use_resource:
distribute_strategy = kwargs.get("distribute_strategy", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
else:
return variables.RefVariable(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
expected_shape=expected_shape,
import_scope=import_scope,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
def default_variable_creator_v2(next_creator=None, **kwargs):
"""Default variable creator."""
assert next_creator is None
initial_value = kwargs.get("initial_value", None)
trainable = kwargs.get("trainable", None)
validate_shape = kwargs.get("validate_shape", True)
caching_device = kwargs.get("caching_device", None)
name = kwargs.get("name", None)
variable_def = kwargs.get("variable_def", None)
dtype = kwargs.get("dtype", None)
import_scope = kwargs.get("import_scope", None)
constraint = kwargs.get("constraint", None)
distribute_strategy = kwargs.get("distribute_strategy", None)
synchronization = kwargs.get("synchronization", None)
aggregation = kwargs.get("aggregation", None)
shape = kwargs.get("shape", None)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
constraint=constraint,
variable_def=variable_def,
import_scope=import_scope,
distribute_strategy=distribute_strategy,
synchronization=synchronization,
aggregation=aggregation,
shape=shape)
variables.default_variable_creator = default_variable_creator
variables.default_variable_creator_v2 = default_variable_creator_v2
def _make_getter(captured_getter, captured_previous):
"""Gets around capturing loop variables in python being broken."""
return lambda **kwargs: captured_getter(captured_previous, **kwargs)
# TODO(apassos) remove forwarding symbol
variable = variables.VariableV1
@tf_export(v1=["variable_creator_scope"])
@tf_contextlib.contextmanager
def variable_creator_scope_v1(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
`trainable` defaults to `True`, unless `synchronization` is
set to `ON_READ`, in which case it defaults to `False`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
use_resource: if True, a ResourceVariable is always created.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
# Note: only the docstrings differ between this and v1.
@tf_export("variable_creator_scope", v1=[])
@tf_contextlib.contextmanager
def variable_creator_scope(variable_creator):
"""Scope which defines a variable creation function to be used by variable().
variable_creator is expected to be a function with the following signature:
```
def variable_creator(next_creator, **kwargs)
```
The creator is supposed to eventually call the next_creator to create a
variable if it does want to create a variable and not call Variable or
ResourceVariable directly. This helps make creators composable. A creator may
choose to create multiple variables, return already existing variables, or
simply register that a variable was created and defer to the next creators in
line. Creators can also modify the keyword arguments seen by the next
creators.
Custom getters in the variable scope will eventually resolve down to these
custom creators when they do create variables.
The valid keyword arguments in kwds are:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, GradientTapes automatically watch
uses of this Variable.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
constraint: A constraint function to be applied to the variable after
updates by some algorithms.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
Args:
variable_creator: the passed creator
Yields:
A scope in which the creator is active
"""
with ops.get_default_graph()._variable_creator_scope(variable_creator): # pylint: disable=protected-access
yield
|
repair_test.py | import os
import os.path
import threading
import time
import re
import pytest
import logging
from collections import namedtuple
from threading import Thread
from cassandra import ConsistencyLevel
from cassandra.query import SimpleStatement
from ccmlib.node import ToolError
from dtest import FlakyRetryPolicy, Tester, create_ks, create_cf
from tools.data import insert_c1c2, query_c1c2
since = pytest.mark.since
logger = logging.getLogger(__name__)
def _repair_options(version, ks='', cf=None, sequential=True):
"""
Function for assembling appropriate repair CLI options,
based on C* version, as defaults have changed.
@param ks The keyspace to repair
@param cf The table to repair
@param sequential If the repair should be a sequential repair [vs parallel]
"""
opts = []
# since version 2.2, default is parallel, otherwise it's sequential
if sequential:
if version >= '2.2':
opts += ['-seq']
else:
if version < '2.2':
opts += ['-par']
# test with full repair
if version >= '2.2':
opts += ['-full']
if ks:
opts += [ks]
if cf:
opts += [cf]
return opts
class BaseRepairTest(Tester):
def check_rows_on_node(self, node_to_check, rows, found=None, missings=None, restart=True):
"""
Function to verify the rows on a given node, without interference
from the other nodes in the cluster
@param node_to_check The given node to check. Should be the node, not the index
@param rows The number of rows we expect
@param found A list of partition keys that we expect to be on the node
@param missings A list of partition keys we expect NOT to be on the node
@param restart Whether or not we should restart the nodes we shut down to perform the assertions. Should only be False if the call to check_rows_on_node is the last line in the test.
"""
if found is None:
found = []
if missings is None:
missings = []
stopped_nodes = []
for node in list(self.cluster.nodes.values()):
if node.is_running() and node is not node_to_check:
stopped_nodes.append(node)
node.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node_to_check, 'ks')
result = list(session.execute("SELECT * FROM cf LIMIT {}".format(rows * 2), timeout=10))
assert len(result) == rows
for k in found:
query_c1c2(session, k, ConsistencyLevel.ONE)
for k in missings:
query = SimpleStatement("SELECT c1, c2 FROM cf WHERE key='k{}'".format(k), consistency_level=ConsistencyLevel.ONE)
res = list(session.execute(query))
assert len([x for x in res if len(x) != 0]) == 0, res
if restart:
for node in stopped_nodes:
node.start(wait_other_notice=True)
def _populate_cluster(self, start=True):
cluster = self.cluster
# Disable hinted handoff and set batch commit log so this doesn't
# interfere with the test (this must be after the populate)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1, retry_policy=FlakyRetryPolicy(max_retries=15))
create_ks(session, 'ks', 3)
if cluster.version() < '4.0':
create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
else:
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
# Insert 1000 keys, kill node 3, insert 1 key, restart node 3, insert 1000 more keys
logger.debug("Inserting data...")
insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL)
node3.flush()
node3.stop(wait_other_notice=True)
insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.TWO)
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
insert_c1c2(session, keys=list(range(1001, 2001)), consistency=ConsistencyLevel.ALL)
cluster.flush()
def _repair_and_verify(self, sequential=True):
cluster = self.cluster
node1, node2, node3 = cluster.nodelist()
# Verify that node3 has only 2000 keys
logger.debug("Checking data on node3...")
self.check_rows_on_node(node3, 2000, missings=[1000])
# Verify that node1 has 2001 keys
logger.debug("Checking data on node1...")
self.check_rows_on_node(node1, 2001, found=[1000])
# Verify that node2 has 2001 keys
logger.debug("Checking data on node2...")
self.check_rows_on_node(node2, 2001, found=[1000])
time.sleep(10) # see CASSANDRA-4373
# Run repair
start = time.time()
logger.debug("starting repair...")
node1.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential))
logger.debug("Repair time: {end}".format(end=time.time() - start))
# Validate that only one range was transfered
out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs])
valid_out_of_sync_pairs = [{node1.address(), node3.address()},
{node2.address(), node3.address()}]
for line, m in out_of_sync_logs:
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
assert int(num_out_of_sync_ranges) == 1, \
"Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges)
assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
# Check node3 now has the key
self.check_rows_on_node(node3, 2001, found=[1000], restart=False)
class TestRepair(BaseRepairTest):
@since('2.2.1', max_version='4')
def test_no_anticompaction_after_dclocal_repair(self):
"""
* Launch a four node, two DC cluster
* Start a -local repair on node1 in dc1
* Assert that the dc1 nodes see repair messages
* Assert that the dc2 nodes do not see repair messages
* Assert no nodes anticompact
# TODO: Verify the anticompaction with sstablemetadata, not just logs
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate([2, 2]).start(wait_for_binary_proto=True)
node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
node1_1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)', '-rate', 'threads=50'])
node1_1.nodetool("repair -local keyspace1 standard1")
assert node1_1.grep_log("Not a global repair")
assert node2_1.grep_log("Not a global repair")
# dc2 should not see these messages:
assert not node1_2.grep_log("Not a global repair")
assert not node2_2.grep_log("Not a global repair")
# and no nodes should do anticompaction:
for node in cluster.nodelist():
assert not node.grep_log("Starting anticompaction")
# @pytest.mark.skipif(CASSANDRA_VERSION_FROM_BUILD == '3.9', reason="Test doesn't run on 3.9")
@pytest.mark.skip_version('3.9')
def test_nonexistent_table_repair(self):
"""
* Check that repairing a non-existent table fails
@jira_ticket CASSANDRA-12279
"""
self.fixture_dtest_setup.ignore_log_patterns = [r'Unknown keyspace/cf pair']
cluster = self.cluster
logger.debug('Starting nodes')
cluster.populate(2).start(wait_for_binary_proto=True)
node1, _ = cluster.nodelist()
logger.debug('Creating keyspace and tables')
node1.stress(stress_options=['write', 'n=1', 'no-warmup',
'cl=ONE', '-schema', 'replication(factor=2)',
'-rate', 'threads=1'])
logger.debug('Repairing non-existent table')
def repair_non_existent_table():
global nodetool_error
try:
node1.nodetool('repair keyspace1 standard2')
except Exception as e:
nodetool_error = e
# Launch in a external thread so it does not hang process
t = Thread(target=repair_non_existent_table)
t.start()
t.join(timeout=60)
assert not t.is_alive(), 'Repair thread on inexistent table is still running'
if self.cluster.version() >= '2.2':
node1.watch_log_for("Unknown keyspace/cf pair", timeout=60)
# Repair only finishes with error status after CASSANDRA-12508 on 3.0+
if self.cluster.version() >= '3.0':
assert 'nodetool_error' in globals() and isinstance(nodetool_error, ToolError), \
'Repair thread on inexistent table did not throw exception'
logger.debug(repr(nodetool_error))
assert 'Unknown keyspace/cf pair' in repr(nodetool_error),\
'Repair thread on inexistent table did not detect inexistent table.'
@since('2.2.1', max_version='4')
def test_no_anticompaction_after_hostspecific_repair(self):
"""
* Launch a four node, two DC cluster
* Start a repair on all nodes, by enumerating with -hosts
* Assert all nodes see a repair messages
* Assert no nodes anticompact
# TODO: Verify the anticompaction with sstablemetadata, not just logs
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate([2, 2]).start(wait_for_binary_proto=True)
node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
node1_1.stress(stress_options=['write', 'n=100K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)', '-rate', 'threads=50'])
node1_1.nodetool("repair -hosts 127.0.0.1,127.0.0.2,127.0.0.3,127.0.0.4 keyspace1 standard1")
for node in cluster.nodelist():
assert node.grep_log("Not a global repair")
for node in cluster.nodelist():
assert not node.grep_log("Starting anticompaction")
@since('2.2.4', max_version='4')
def test_no_anticompaction_after_subrange_repair(self):
"""
* Launch a three node, two DC cluster
* Start a repair on a token range
* Assert all nodes see repair messages
* Assert no nodes anticompact
# TODO: Verify the anticompaction with sstablemetadata, not just logs
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
node1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=50'])
node1.nodetool("repair -st 0 -et 1000 keyspace1 standard1")
for node in cluster.nodelist():
assert node.grep_log("Not a global repair")
for node in cluster.nodelist():
assert not node.grep_log("Starting anticompaction")
def _get_repaired_data(self, node, keyspace):
"""
Based on incremental_repair_test.py:TestIncRepair implementation.
"""
_sstable_name = re.compile('SSTable: (.+)')
_repaired_at = re.compile('Repaired at: (\d+)')
_sstable_data = namedtuple('_sstabledata', ('name', 'repaired'))
out = node.run_sstablemetadata(keyspace=keyspace).stdout
def matches(pattern):
return [_f for _f in [pattern.match(l) for l in out.split('\n')] if _f]
names = [m.group(1) for m in matches(_sstable_name)]
repaired_times = [int(m.group(1)) for m in matches(_repaired_at)]
assert names
assert repaired_times
return [_sstable_data(*a) for a in zip(names, repaired_times)]
@since('2.2.10', max_version='4')
def test_no_anticompaction_of_already_repaired(self):
"""
* Launch three node cluster and stress with RF2
* Do incremental repair to have all sstables flagged as repaired
* Stop node2, stress, start again and run full -pr repair
* Verify that none of the already repaired sstables have been anti-compacted again
@jira_ticket CASSANDRA-13153
"""
cluster = self.cluster
logger.debug("Starting cluster..")
# disable JBOD conf since the test expects sstables to be on the same disk
cluster.set_datadir_count(1)
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
# we use RF to make sure to cover only a set of sub-ranges when doing -full -pr
node1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=50'])
# disable compaction to make sure that we won't create any new sstables with repairedAt 0
node1.nodetool('disableautocompaction keyspace1 standard1')
# Do incremental repair of all ranges. All sstables are expected for have repairedAt set afterwards.
node1.nodetool("repair keyspace1 standard1")
meta = self._get_repaired_data(node1, 'keyspace1')
repaired = set([m for m in meta if m.repaired > 0])
assert len(repaired) == len(meta)
# stop node2, stress and start full repair to find out how synced ranges affect repairedAt values
node2.stop(wait_other_notice=True)
node1.stress(stress_options=['write', 'n=40K', 'no-warmup', 'cl=ONE', '-rate', 'threads=50'])
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
node1.nodetool("repair -full -pr keyspace1 standard1")
meta = self._get_repaired_data(node1, 'keyspace1')
repairedAfterFull = set([m for m in meta if m.repaired > 0])
# already repaired sstables must remain untouched
assert repaired.intersection(repairedAfterFull) == repaired
@since('2.2.1', '4')
def test_anticompaction_after_normal_repair(self):
"""
* Launch a four node, two DC cluster
* Start a normal repair
* Assert every node anticompacts
@jira_ticket CASSANDRA-10422
"""
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate([2, 2]).start(wait_for_binary_proto=True)
node1_1, node2_1, node1_2, node2_2 = cluster.nodelist()
node1_1.stress(stress_options=['write', 'n=50K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=4)'])
node1_1.nodetool("repair keyspace1 standard1")
for node in cluster.nodelist():
assert "Starting anticompaction"
def test_simple_sequential_repair(self):
"""
Calls simple repair test with a sequential repair
"""
self._simple_repair(sequential=True)
def test_simple_parallel_repair(self):
"""
Calls simple repair test with a parallel repair
"""
self._simple_repair(sequential=False)
def test_empty_vs_gcable_sequential_repair(self):
"""
Calls empty_vs_gcable repair test with a sequential repair
"""
self._empty_vs_gcable_no_repair(sequential=True)
def test_empty_vs_gcable_parallel_repair(self):
"""
Calls empty_vs_gcable repair test with a parallel repair
"""
self._empty_vs_gcable_no_repair(sequential=False)
def test_range_tombstone_digest_sequential_repair(self):
"""
Calls range_tombstone_digest with a sequential repair
"""
self._range_tombstone_digest(sequential=True)
def test_range_tombstone_digest_parallel_repair(self):
"""
Calls range_tombstone_digest with a parallel repair
"""
self._range_tombstone_digest(sequential=False)
@since('2.1')
def test_shadowed_cell_digest_sequential_repair(self):
"""
Calls _cell_shadowed_by_range_tombstone with sequential repair
"""
self._cell_shadowed_by_range_tombstone(sequential=True)
@since('2.1')
def test_shadowed_cell_digest_parallel_repair(self):
"""
Calls _cell_shadowed_by_range_tombstone with parallel repair
"""
self._cell_shadowed_by_range_tombstone(sequential=False)
@since('3.0')
def test_shadowed_range_tombstone_digest_sequential_repair(self):
"""
Calls _range_tombstone_shadowed_by_range_tombstone with sequential repair
"""
self._range_tombstone_shadowed_by_range_tombstone(sequential=True)
@since('3.0')
def test_shadowed_range_tombstone_digest_parallel_repair(self):
"""
Calls _range_tombstone_shadowed_by_range_tombstone with parallel repair
"""
self._range_tombstone_shadowed_by_range_tombstone(sequential=False)
@pytest.mark.no_vnodes
def test_simple_repair_order_preserving(self):
"""
Calls simple repair test with OPP and sequential repair
@jira_ticket CASSANDRA-5220
"""
self._simple_repair(order_preserving_partitioner=True)
def _simple_repair(self, order_preserving_partitioner=False, sequential=True):
"""
* Configure a three node cluster to not use hinted handoff, and to use batch commitlog
* Launch the cluster
* Create a keyspace at RF 3 and table
* Insert one thousand rows at CL ALL
* Flush on node3 and shut it down
* Insert one row at CL TWO
* Restart node3
* Insert one thousand more rows at CL ALL
* Flush all nodes
* Check node3 only has 2000 keys
* Check node1 and node2 have 2001 keys
* Perform the repair type specified by the parent test
* Assert the appropriate messages are logged
* Assert node3 now has all data
@jira_ticket CASSANDRA-4373
"""
if order_preserving_partitioner:
self.cluster.set_partitioner('org.apache.cassandra.dht.ByteOrderedPartitioner')
self._populate_cluster()
self._repair_and_verify(sequential)
def _empty_vs_gcable_no_repair(self, sequential):
"""
Repairing empty partition and tombstoned partition older than gc grace
should be treated as the same and no repair is necessary.
@jira_ticket CASSANDRA-8979.
"""
cluster = self.cluster
cluster.populate(2)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
# create keyspace with RF=2 to be able to be repaired
create_ks(session, 'ks', 2)
# we create two tables, one has low gc grace seconds so that the data
# can be dropped during test (but we don't actually drop them).
# the other has default gc.
# compaction is disabled not to purge data
query = """
CREATE TABLE cf1 (
key text,
c1 text,
c2 text,
PRIMARY KEY (key, c1)
)
WITH gc_grace_seconds=1
AND compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
"""
session.execute(query)
time.sleep(.5)
query = """
CREATE TABLE cf2 (
key text,
c1 text,
c2 text,
PRIMARY KEY (key, c1)
)
WITH compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
"""
session.execute(query)
time.sleep(.5)
# take down node2, so that only node1 has gc-able data
node2.stop(wait_other_notice=True)
for cf in ['cf1', 'cf2']:
# insert some data
for i in range(0, 10):
for j in range(0, 1000):
query = SimpleStatement("INSERT INTO {} (key, c1, c2) VALUES ('k{}', 'v{}', 'value')".format(cf, i, j), consistency_level=ConsistencyLevel.ONE)
session.execute(query)
node1.flush()
# delete those data, half with row tombstone, and the rest with cell range tombstones
for i in range(0, 5):
query = SimpleStatement("DELETE FROM {} WHERE key='k{}'".format(cf, i), consistency_level=ConsistencyLevel.ONE)
session.execute(query)
node1.flush()
for i in range(5, 10):
for j in range(0, 1000):
query = SimpleStatement("DELETE FROM {} WHERE key='k{}' AND c1='v{}'".format(cf, i, j), consistency_level=ConsistencyLevel.ONE)
session.execute(query)
node1.flush()
# sleep until gc grace seconds pass so that cf1 can be dropped
time.sleep(2)
# bring up node2 and repair
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
node2.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential))
# check no rows will be returned
for cf in ['cf1', 'cf2']:
for i in range(0, 10):
query = SimpleStatement("SELECT c1, c2 FROM {} WHERE key='k{}'".format(cf, i), consistency_level=ConsistencyLevel.ALL)
res = list(session.execute(query))
assert len([x for x in res if len(x) != 0]) == 0, res
# check log for no repair happened for gcable data
out_of_sync_logs = node2.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync for cf1".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 0, "GC-able data does not need to be repaired with empty data: " + str([elt[0] for elt in out_of_sync_logs])
# check log for actual repair for non gcable data
out_of_sync_logs = node2.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync for cf2".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) > 0, "Non GC-able data should be repaired"
def _range_tombstone_digest(self, sequential):
"""
multiple range tombstones for same partition and interval must not create a digest mismatch as long
as the most recent tombstone is present.
@jira_ticket cassandra-11349.
"""
def withsession(session, node1):
session.execute("delete from table1 where c1 = 'a' and c2 = 'b'")
node1.flush()
# recreate same tombstone (will be flushed by repair, so we end up with 2x on node1 and 1x on node2)
session.execute("delete from table1 where c1 = 'a' and c2 = 'b'")
self._repair_digest(sequential, withsession)
def _cell_shadowed_by_range_tombstone(self, sequential):
"""
Cells shadowed by range tombstones must not effect repairs (given tombstones are present on all nodes)
@jira_ticket CASSANDRA-11349.
"""
def withSession(session, node1):
session.execute("INSERT INTO table1 (c1, c2, c3, c4) VALUES ('a', 'b', 'c', 1)")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b'")
self._repair_digest(sequential, withSession)
def _range_tombstone_shadowed_by_range_tombstone(self, sequential):
"""
Range tombstones shadowed by other range tombstones must not effect repairs
@jira_ticket CASSANDRA-11349.
"""
def withSession(session, node1):
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'c'")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b'")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'd'")
node1.flush()
session.execute("DELETE FROM table1 WHERE c1 = 'a' AND c2 = 'b' AND c3 = 'a'")
self._repair_digest(sequential, withSession)
def _repair_digest(self, sequential, populate):
cluster = self.cluster
cluster.populate(2)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
cluster.start()
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node1)
# create keyspace with RF=2 to be able to be repaired
create_ks(session, 'ks', 2)
query = """
CREATE TABLE IF NOT EXISTS table1 (
c1 text,
c2 text,
c3 text,
c4 float,
PRIMARY KEY (c1, c2, c3)
)
WITH compaction = {'class': 'SizeTieredCompactionStrategy', 'enabled': 'false'};
"""
session.execute(query)
populate(session, node1)
node2.repair(_repair_options(self.cluster.version(), ks='ks', sequential=sequential))
# check log for no repair happened for gcable data
out_of_sync_logs = node2.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync for table1".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 0, "Digest mismatch for range tombstone: {}".format(str([elt[0] for elt in out_of_sync_logs]))
def test_local_dc_repair(self):
"""
* Set up a multi DC cluster
* Perform a -local repair on one DC
* Assert only nodes in that DC are repaired
"""
cluster = self._setup_multi_dc()
node1 = cluster.nodes["node1"]
node2 = cluster.nodes["node2"]
logger.debug("starting repair...")
opts = ["-local"]
opts += _repair_options(self.cluster.version(), ks="ks")
node1.repair(opts)
# Verify that only nodes in dc1 are involved in repair
out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 1, "Lines matching: {}".format(len(out_of_sync_logs))
line, m = out_of_sync_logs[0]
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges)
valid_out_of_sync_pairs = {node1.address(), node2.address()}
assert out_of_sync_nodes == valid_out_of_sync_pairs, "Unrelated node found in local repair: {}, expected {}".format(out_of_sync_nodes, valid_out_of_sync_pairs)
# Check node2 now has the key
self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
def test_dc_repair(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair on two dc's
* Assert only nodes on those dcs were repaired
"""
cluster = self._setup_multi_dc()
node1 = cluster.nodes["node1"]
node2 = cluster.nodes["node2"]
node3 = cluster.nodes["node3"]
logger.debug("starting repair...")
opts = ["-dc", "dc1", "-dc", "dc2"]
opts += _repair_options(self.cluster.version(), ks="ks")
node1.repair(opts)
# Verify that only nodes in dc1 and dc2 are involved in repair
out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs])
valid_out_of_sync_pairs = [{node1.address(), node2.address()},
{node2.address(), node3.address()}]
for line, m in out_of_sync_logs:
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges)
assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
# Check node2 now has the key
self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
def test_dc_parallel_repair(self):
"""
* Set up a multi DC cluster
* Perform a -dc repair on two dc's, with -dcpar
* Assert only nodes on those dcs were repaired
"""
cluster = self._setup_multi_dc()
node1 = cluster.nodes["node1"]
node2 = cluster.nodes["node2"]
node3 = cluster.nodes["node3"]
logger.debug("starting repair...")
opts = ["-dc", "dc1", "-dc", "dc2", "-dcpar"]
opts += _repair_options(self.cluster.version(), ks="ks", sequential=False)
node1.repair(opts)
# Verify that only nodes in dc1 and dc2 are involved in repair
out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 2, "Lines matching: " + str([elt[0] for elt in out_of_sync_logs])
valid_out_of_sync_pairs = [{node1.address(), node2.address()},
{node2.address(), node3.address()}]
for line, m in out_of_sync_logs:
num_out_of_sync_ranges, out_of_sync_nodes = m.group(3), {m.group(1), m.group(2)}
assert int(num_out_of_sync_ranges) == 1, "Expecting 1 range out of sync for {}, but saw {}".format(out_of_sync_nodes, num_out_of_sync_ranges)
assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
# Check node2 now has the key
self.check_rows_on_node(node2, 2001, found=[1000], restart=False)
# Check the repair was a dc parallel repair
if self.cluster.version() >= '2.2':
assert len(node1.grep_log('parallelism: dc_parallel')) == 1, str(node1.grep_log('parallelism'))
else:
assert len(node1.grep_log('parallelism=PARALLEL')) == 1, str(node1.grep_log('parallelism'))
def _setup_multi_dc(self):
"""
Sets up 3 DCs (2 nodes in 'dc1', and one each in 'dc2' and 'dc3').
After set up, node2 in dc1 lacks some data and needs to be repaired.
"""
cluster = self.cluster
# Disable hinted handoff and set batch commit log so this doesn't
# interfer with the test (this must be after the populate)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
# populate 2 nodes in dc1, and one node each in dc2 and dc3
cluster.populate([2, 1, 1]).start(wait_for_binary_proto=True)
node1, node2, node3, node4 = cluster.nodelist()
session = self.patient_cql_connection(node1)
session.execute("CREATE KEYSPACE ks WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 2, 'dc2': 1, 'dc3':1}")
session.execute("USE ks")
if cluster.version() < '4.0':
create_cf(session, 'cf', read_repair=0.0, columns={'c1': 'text', 'c2': 'text'})
else:
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
# Insert 1000 keys, kill node 2, insert 1 key, restart node 2, insert 1000 more keys
logger.debug("Inserting data...")
insert_c1c2(session, n=1000, consistency=ConsistencyLevel.ALL)
node2.flush()
node2.stop(wait_other_notice=True)
insert_c1c2(session, keys=(1000, ), consistency=ConsistencyLevel.THREE)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
node1.watch_log_for_alive(node2)
insert_c1c2(session, keys=list(range(1001, 2001)), consistency=ConsistencyLevel.ALL)
cluster.flush()
# Verify that only node2 has only 2000 keys and others have 2001 keys
logger.debug("Checking data...")
self.check_rows_on_node(node2, 2000, missings=[1000])
for node in [node1, node3, node4]:
self.check_rows_on_node(node, 2001, found=[1000])
return cluster
@since('2.2')
def parallel_table_repair_noleak(self):
"""
@jira_ticket CASSANDRA-11215
Tests that multiple parallel repairs on the same table isn't
causing reference leaks.
"""
self.fixture_dtest_setup.ignore_log_patterns = [
"Cannot start multiple repair sessions over the same sstables", # The message we are expecting
"Validation failed in", # Expecting validation to fail
"RMI Runtime", # JMX Repair failures
"Session completed with the following error", # The nodetool repair error
"ValidationExecutor", # Errors by the validation executor
"RepairJobTask" # Errors by the repair job task
]
cluster = self.cluster
logger.debug("Starting cluster..")
cluster.populate([3]).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
node1.stress(stress_options=['write', 'n=10k', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)', '-rate', 'threads=50'])
# Start multiple repairs in parallel
threads = []
for i in range(3):
t = threading.Thread(target=node1.nodetool, args=("repair keyspace1 standard1",))
threads.append(t)
t.start()
# Wait for the repairs to finish
for t in threads:
t.join()
found_message = False
# All nodes should reject multiple repairs and have no reference leaks
for node in cluster.nodelist():
if len(node.grep_log("Cannot start multiple repair sessions over the same sstables")) > 0:
found_message = True
break
assert found_message
@pytest.mark.no_vnodes
def test_token_range_repair(self):
"""
Test repair using the -st and -et options
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on a range that only belongs to node1
* Verify that nodes 1 and 2, and only nodes 1+2, are repaired
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
self._parameterized_range_repair(repair_opts=['-st', str(node3.initial_token), '-et', str(node1.initial_token)])
@pytest.mark.no_vnodes
def test_token_range_repair_with_cf(self):
"""
@jira_ticket CASSANDRA-11866
Test repair using the -st and -et, and -cf options
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on a range that only belongs to node1 on the wrong cf
* Verify that the data did not get repaired
* Issue a repair on a range that belongs to the right cf
* Verify that the data was repaired
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
# Insert data, kill node 2, insert more data, restart node 2, insert another set of data
logger.debug("Inserting data...")
node1.stress(['write', 'n=1k', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30'])
node2.flush()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=1K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K'])
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
node1.stress(['write', 'n=1K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=40..60K'])
cluster.flush()
# Repair only the range node 1 owns on the wrong CF, assert everything is still broke
opts = ['-st', str(node3.initial_token), '-et', str(node1.initial_token), ]
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='counter1', sequential=False)
node1.repair(opts)
assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent."
assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair."
out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
assert len(out_of_sync_logs) == 0, "We repaired the wrong CF == so things should still be broke"
# Repair only the range node 1 owns on the right CF, assert everything is fixed
opts = ['-st', str(node3.initial_token), '-et', str(node1.initial_token), ]
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
node1.repair(opts)
assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent."
assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair."
out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
_, matches = out_of_sync_logs[0]
out_of_sync_nodes = {matches.group(1), matches.group(2)}
valid_out_of_sync_pairs = [{node1.address(), node2.address()}]
assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
@pytest.mark.no_vnodes
def test_partitioner_range_repair(self):
"""
Test repair using the -pr option
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on a range that only belongs to node1
* Verify that nodes 1 and 2, and only nodes 1+2, are repaired
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
self._parameterized_range_repair(repair_opts=['-pr'])
@since('3.10')
@pytest.mark.no_vnodes
def test_pull_repair(self):
"""
Test repair using the --pull option
@jira_ticket CASSANDRA-9876
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a pull repair on a range that only belongs to node1
* Verify that nodes 1 and 2, and only nodes 1+2, are repaired
* Verify that node1 only received data
* Verify that node2 only sent data
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
node1_address = node1.network_interfaces['binary'][0]
node2_address = node2.network_interfaces['binary'][0]
self._parameterized_range_repair(repair_opts=['--pull', '--in-hosts', node1_address + ',' + node2_address, '-st', str(node3.initial_token), '-et', str(node1.initial_token)])
# Node 1 should only receive files (as we ran a pull repair on node1)
assert len(node1.grep_log("Receiving [1-9][0-9]* files")) > 0
assert len(node1.grep_log("sending [1-9][0-9]* files")) == 0
assert len(node1.grep_log("sending 0 files")) > 0
# Node 2 should only send files (as we ran a pull repair on node1)
assert len(node2.grep_log("Receiving [1-9][0-9]* files")) == 0
assert len(node2.grep_log("Receiving 0 files")) > 0
assert len(node2.grep_log("sending [1-9][0-9]* files")) > 0
def _parameterized_range_repair(self, repair_opts):
"""
@param repair_opts A list of strings which represent cli args to nodetool repair
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on a range that only belongs to node1, using repair_opts
* Verify that nodes 1 and 2, and only nodes 1+2, are repaired
"""
cluster = self.cluster
node1, node2, node3 = cluster.nodelist()
# Insert data, kill node 2, insert more data, restart node 2, insert another set of data
logger.debug("Inserting data...")
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30'])
node2.flush()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K'])
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=40..60K'])
cluster.flush()
# Repair only the range node 1 owns
opts = repair_opts
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
node1.repair(opts)
assert len(node1.grep_log('are consistent for standard1')) == 0, "Nodes 1 and 2 should not be consistent."
assert len(node3.grep_log('Repair command')) == 0, "Node 3 should not have been involved in the repair."
out_of_sync_logs = node1.grep_log("{} and {} have ([0-9]+) range\(s\) out of sync".format(cluster.address_regex(), cluster.address_regex()))
_, matches = out_of_sync_logs[0]
out_of_sync_nodes = {matches.group(1), matches.group(2)}
valid_out_of_sync_pairs = [{node1.address(), node2.address()}]
assert out_of_sync_nodes, valid_out_of_sync_pairs in str(out_of_sync_nodes)
@since('2.2')
def test_trace_repair(self):
"""
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on to node1, setting job threads to 2 and with tracing enabled
* Check the trace data was written, and that the right job thread count was used
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
logger.debug("Inserting data...")
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate', 'threads=30'])
node2.flush()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=20K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate', 'threads=30', '-pop', 'seq=20..40K'])
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
cluster.flush()
job_thread_count = '2'
opts = ['-tr', '-j', job_thread_count]
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
node1.repair(opts)
time.sleep(5) # Give the trace table some time to populate
session = self.patient_cql_connection(node1)
rows = list(session.execute("SELECT activity FROM system_traces.events"))
# This check assumes that the only (or at least first) thing to write to `system_traces.events.activity` is
# the repair task triggered in the test.
assert 'job threads: {}'.format(job_thread_count) in rows[0][0], \
'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0])
@since('2.2')
def test_thread_count_repair(self):
"""
* Launch a three node cluster
* Insert some data at RF 2
* Shut down node2, insert more data, restore node2
* Issue a repair on to node1, setting job threads
* Check the right job thread count was used
* Repeat steps 2 through 5 with all job count options
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Starting cluster..")
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
# Valid job thread counts: 1, 2, 3, and 4
for job_thread_count in range(1, 5):
logger.debug("Inserting data...")
node1.stress(['write', 'n=2K', 'no-warmup', 'cl=ALL', '-schema', 'replication(factor=2)', '-rate',
'threads=30', '-pop', 'seq={}..{}K'.format(2 * (job_thread_count - 1), 2 * job_thread_count)])
node2.flush()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=2K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=2)', '-rate',
'threads=30', '-pop', 'seq={}..{}K'.format(2 * (job_thread_count), 2 * (job_thread_count + 1))])
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
cluster.flush()
session = self.patient_cql_connection(node1)
session.execute("TRUNCATE system_traces.events")
opts = ['-tr', '-j', str(job_thread_count)]
opts += _repair_options(self.cluster.version(), ks='keyspace1', cf='standard1', sequential=False)
node1.repair(opts)
time.sleep(5) # Give the trace table some time to populate
rows = list(session.execute("SELECT activity FROM system_traces.events"))
# This check assumes that the only (or at least first) thing to write to `system_traces.events.activity` is
# the repair task triggered in the test.
assert 'job threads: {}'.format(job_thread_count) in rows[0][0], \
'Expected {} job threads in repair options. Instead we saw {}'.format(job_thread_count, rows[0][0])
@pytest.mark.no_vnodes
def test_multiple_concurrent_repairs(self):
"""
@jira_ticket CASSANDRA-11451
Make sure we can run sub range repairs in parallel - and verify that we actually do repair
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
node2.stop(wait_other_notice=True)
node1.stress(['write', 'n=1M', 'no-warmup', '-schema', 'replication(factor=3)', '-rate', 'threads=30'])
node2.start(wait_for_binary_proto=True)
t1 = threading.Thread(target=node1.nodetool, args=('repair keyspace1 standard1 -full -st {} -et {}'.format(str(node3.initial_token), str(node1.initial_token)),))
t2 = threading.Thread(target=node2.nodetool, args=('repair keyspace1 standard1 -full -st {} -et {}'.format(str(node1.initial_token), str(node2.initial_token)),))
t3 = threading.Thread(target=node3.nodetool, args=('repair keyspace1 standard1 -full -st {} -et {}'.format(str(node2.initial_token), str(node3.initial_token)),))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
node1.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
_, _, rc = node2.stress(['read', 'n=1M', 'no-warmup', '-rate', 'threads=30'], whitelist=True)
assert rc == 0
@since('4.0')
def test_wide_row_repair(self):
"""
@jira_ticket CASSANDRA-13899
Make sure compressed vs uncompressed blocks are handled correctly when stream decompressing
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.populate(2).start(wait_for_binary_proto=True)
node1, node2 = cluster.nodelist()
node2.stop(wait_other_notice=True)
profile_path = os.path.join(os.getcwd(), 'stress_profiles/repair_wide_rows.yaml')
logger.info(("yaml = " + profile_path))
node1.stress(['user', 'profile=' + profile_path, 'n=50', 'ops(insert=1)', 'no-warmup', '-rate', 'threads=8',
'-insert', 'visits=FIXED(100K)', 'revisit=FIXED(100K)'])
node2.start(wait_for_binary_proto=True)
node2.repair()
def test_dead_coordinator(self):
"""
@jira_ticket CASSANDRA-11824
Make sure parent repair session is cleared out if the repair coordinator dies
"""
cluster = self.cluster
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
node1.stress(['write', 'n=100k', '-schema', 'replication(factor=3)', '-rate', 'threads=30'])
def run_repair():
try:
if cluster.version() >= "2.2":
node1.repair()
else:
node1.nodetool('repair keyspace1 standard1 -inc -par')
except ToolError:
logger.debug("got expected exception during repair, ignoring")
t1 = threading.Thread(target=run_repair)
t1.start()
if cluster.version() > "2.2":
node2.watch_log_for('Validating ValidationRequest', filename='debug.log')
else:
node1.watch_log_for('requesting merkle trees', filename='system.log')
time.sleep(2)
logger.debug("stopping node1")
node1.stop(gently=False, wait_other_notice=True)
t1.join()
logger.debug("starting node1 - first repair should have failed")
node1.start(wait_for_binary_proto=True, wait_other_notice=True)
logger.debug("running second repair")
if cluster.version() >= "2.2":
node1.repair()
else:
node1.nodetool('repair keyspace1 standard1 -inc -par')
@since('2.2')
def test_dead_sync_initiator(self):
"""
@jira_ticket CASSANDRA-12901
"""
self._test_failure_during_repair(phase='sync', initiator=True)
@since('2.2')
def test_dead_sync_participant(self):
"""
@jira_ticket CASSANDRA-12901
"""
self._test_failure_during_repair(phase='sync', initiator=False,)
@since('2.2', max_version='4')
def test_failure_during_anticompaction(self):
"""
@jira_ticket CASSANDRA-12901
"""
self._test_failure_during_repair(phase='anticompaction',)
@since('2.2')
def test_failure_during_validation(self):
"""
@jira_ticket CASSANDRA-12901
"""
self._test_failure_during_repair(phase='validation')
def _test_failure_during_repair(self, phase, initiator=False):
cluster = self.cluster
# We are not interested in specific errors, but
# that the repair session finishes on node failure without hanging
self.fixture_dtest_setup.ignore_log_patterns = [
"Endpoint .* died",
"Streaming error occurred",
"StreamReceiveTask",
"Stream failed",
"Session completed with the following error",
"Repair session .* for range .* failed with error",
"Sync failed between .* and .*",
"failed to send a stream message/file to peer"
]
# Disable hinted handoff and set batch commit log so this doesn't
# interfere with the test (this must be after the populate)
cluster.set_configuration_options(values={'hinted_handoff_enabled': False})
cluster.set_batch_commitlog(enabled=True)
logger.debug("Setting up cluster..")
cluster.populate(3)
node1, node2, node3 = cluster.nodelist()
node_to_kill = node2 if (phase == 'sync' and initiator) else node3
logger.debug("Setting up byteman on {}".format(node_to_kill.name))
# set up byteman
node_to_kill.byteman_port = '8100'
node_to_kill.import_config_files()
logger.debug("Starting cluster..")
cluster.start(wait_other_notice=True)
logger.debug("stopping node3")
node3.stop(gently=False, wait_other_notice=True)
self.patient_exclusive_cql_connection(node1)
logger.debug("inserting data while node3 is down")
node1.stress(stress_options=['write', 'n=1k',
'no-warmup', 'cl=ONE',
'-schema', 'replication(factor=3)',
'-rate', 'threads=10'])
logger.debug("bring back node3")
node3.start(wait_other_notice=True, wait_for_binary_proto=True)
script = 'stream_sleep.btm' if phase == 'sync' else 'repair_{}_sleep.btm'.format(phase)
logger.debug("Submitting byteman script to {}".format(node_to_kill.name))
# Sleep on anticompaction/stream so there will be time for node to be killed
node_to_kill.byteman_submit(['./byteman/{}'.format(script)])
def node1_repair():
global nodetool_error
try:
node1.nodetool('repair keyspace1 standard1')
except Exception as e:
nodetool_error = e
logger.debug("repair node1")
# Launch in a external thread so it does not hang process
t = Thread(target=node1_repair)
t.start()
logger.debug("Will kill {} in middle of {}".format(node_to_kill.name, phase))
msg_to_wait = 'streaming plan for Repair'
if phase == 'anticompaction':
msg_to_wait = 'Got anticompaction request'
elif phase == 'validation':
msg_to_wait = 'Validating'
node_to_kill.watch_log_for(msg_to_wait, filename='debug.log')
node_to_kill.stop(gently=False, wait_other_notice=True)
logger.debug("Killed {}, now waiting repair to finish".format(node_to_kill.name))
t.join(timeout=60)
assert not t.is_alive(), 'Repair still running after sync {} was killed'\
.format("initiator" if initiator else "participant")
if cluster.version() < '4.0' or phase != 'sync':
# the log entry we're watching for in the sync task came from the
# anti compaction at the end of the repair, which has been removed in 4.0
node1.watch_log_for('Endpoint .* died', timeout=60)
node1.watch_log_for('Repair command .* finished', timeout=60)
RepairTableContents = namedtuple('RepairTableContents',
['parent_repair_history', 'repair_history'])
@since('2.2')
@pytest.mark.resource_intensive
class TestRepairDataSystemTable(Tester):
"""
@jira_ticket CASSANDRA-5839
Tests the `system_distributed.parent_repair_history` and
`system_distributed.repair_history` tables by writing thousands of records
to a cluster, then ensuring these tables are in valid states before and
after running repair.
"""
@pytest.fixture(scope='function', autouse=True)
def fixture_set_cluster_settings(self, fixture_dtest_setup):
"""
Prepares a cluster for tests of the repair history tables by starting
a 5-node cluster, then inserting 5000 values with RF=3.
"""
fixture_dtest_setup.cluster.populate(5).start(wait_for_binary_proto=True)
self.node1 = self.cluster.nodelist()[0]
self.session = fixture_dtest_setup.patient_cql_connection(self.node1)
self.node1.stress(stress_options=['write', 'n=5K', 'no-warmup', 'cl=ONE', '-schema', 'replication(factor=3)'])
fixture_dtest_setup.cluster.flush()
def repair_table_contents(self, node, include_system_keyspaces=True):
"""
@param node the node to connect to and query
@param include_system_keyspaces if truthy, return repair information about all keyspaces. If falsey, filter out keyspaces whose name contains 'system'
Return a `RepairTableContents` `namedtuple` containing the rows in
`node`'s `system_distributed.parent_repair_history` and
`system_distributed.repair_history` tables. If `include_system_keyspaces`,
include all results. If not `include_system_keyspaces`, filter out
repair information about system keyspaces, or at least keyspaces with
'system' in their names.
"""
session = self.patient_cql_connection(node)
def execute_with_all(stmt):
return session.execute(SimpleStatement(stmt, consistency_level=ConsistencyLevel.ALL))
parent_repair_history = execute_with_all('SELECT * FROM system_distributed.parent_repair_history;')
repair_history = execute_with_all('SELECT * FROM system_distributed.repair_history;')
if not include_system_keyspaces:
parent_repair_history = [row for row in parent_repair_history
if 'system' not in row.keyspace_name]
repair_history = [row for row in repair_history if
'system' not in row.keyspace_name]
return RepairTableContents(parent_repair_history=parent_repair_history,
repair_history=repair_history)
@pytest.mark.skip(reason='hangs CI')
def test_initial_empty_repair_tables(self):
logger.debug('repair tables:')
logger.debug(self.repair_table_contents(node=self.node1, include_system_keyspaces=False))
repair_tables_dict = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)._asdict()
for table_name, table_contents in list(repair_tables_dict.items()):
assert not table_contents, '{} is non-empty'.format(table_name)
def test_repair_parent_table(self):
"""
Test that `system_distributed.parent_repair_history` is properly populated
after repair by:
- running repair on `node` and
- checking that there are a non-zero number of entries in `parent_repair_history`.
"""
self.node1.repair()
parent_repair_history, _ = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)
assert len(parent_repair_history)
def test_repair_table(self):
"""
Test that `system_distributed.repair_history` is properly populated
after repair by:
- running repair on `node` and
- checking that there are a non-zero number of entries in `repair_history`.
"""
self.node1.repair()
_, repair_history = self.repair_table_contents(node=self.node1, include_system_keyspaces=False)
assert len(repair_history)
|
face_detector_pi203_node.py | #!/usr/bin/env python
import rospy
import numpy as np
import math
from duckietown_msgs.msg import Twist2DStamped
from sensor_msgs.msg import CompressedImage, Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import sys
import time
import threading
class face_detector_wama(object):
def __init__(self):
self.node_name = rospy.get_name()
self.thread_lock = threading.Lock()
self.active = True
# To do: initial no-faces-detected as face_detected scenario
self.face_detected = 0
self.bridge = CvBridge()
# Publication
#Checkpoint 1 : publish ros message topic: /node_name/car_cmd, datatype: Twist2DStamped
self.pub_car_cmd = rospy.Publisher("~car_cmd",Twist2DStamped,queue_size=1)
#Checkpoint 2: What kind of command to publish a ros message of Image with a /image_with_face topic ?
self.pub_image_face = rospy.Publisher("~image_with_face", Image, queue_size=1)
# Subscription
#Checkpoint 3 : subscribe ros message topic: /node_name/joystick_car_cmd datatype: Twist2DStamped, callback function: self.cbJoystick
self.sub_joystick_car_cmd = rospy.Subscriber("~joystick_car_cmd", Twist2DStamped, self.cbJoystick,queue_size=1)
#Checkpoint 4 : What kind of command to publish a ros message of CompressedImage with an /image topic ?
# Hint : self.cbImage as a callback function
self.sub_image_origin = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
# safe shutdown
rospy.on_shutdown(self.custom_shutdown)
# timer
rospy.loginfo("[%s] Initialized " %(rospy.get_name()))
def custom_shutdown(self):
rospy.loginfo("[%s] Shutting down..." %self.node_name)
# Send stop command
car_control_msg = Twist2DStamped()
car_control_msg.v = 0.0
car_control_msg.omega = 0.0
self.publishCmd(car_control_msg)
rospy.sleep(0.5) #To make sure that it gets published.
rospy.loginfo("[%s] Shutdown" %self.node_name)
def cbImage(self, image_msg):
if not self.active:
return
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
return
try:
self.cbFacedetect(image_msg)
finally:
self.thread_lock.release()
def publishCmd(self,car_cmd_msg):
# To do: using pub_car_cmd publisher we initialized at line 27 to publish car command message
self.pub_car_cmd.publish(car_cmd_msg)
def cbJoystick(self,car_cmd_msg):
#Scenario 1: What the duckiebot will do if in the scenario there are no face(s) detected? Does the car control command keep executing Joystick command ?
if self.face_detected == 0:
#Checkpoint 5: initial a car control command for publishing ros message, datatype: Twist2DStamped
car_control_msg = Twist2DStamped()
#Checkpoint 6: Initialize car control command as car command message
car_control_msg.v = car_cmd_msg.v
car_control_msg.omega = car_cmd_msg.omega
#Checkpoint 7: publish car control command
self.publishCmd(car_cmd_msg)
def cbFacedetect(self, image_msg):
# Decompress image and convert ROS image message to cv image
narr = np.fromstring(image_msg.data, np.uint8)
image = cv2.imdecode(narr, cv2.CV_LOAD_IMAGE_COLOR)
# Initial opencv CascadeClassifier class to detect objects and import face detection module
faceCascade = cv2.CascadeClassifier('/home/ubuntu/duckietown/catkin_ws/src/spring2016_nctu/wama/face_detector/src/haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(gray,scaleFactor=2,minNeighbors=5,minSize=(5, 5),flags = cv2.cv.CV_HAAR_SCALE_IMAGE)
print "Found {0} faces!".format(len(faces))
# Draw face detections region proposals in the image
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Convert cv image to ROS image message
image_msg_out = self.bridge.cv2_to_imgmsg(image, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
#Checkpoint 8: using pub_image_face publisher we initialized at line 30 to publish image with face region proposals
self.pub_image_face.publish(image_msg_out)
#Checkpoint 9: initial a car command message for publishing ros message, datatype: Twist2DStamped
car_control_msg = Twist2DStamped()
#Scenario 2: What the duckiebot will do if in the scenario there are face(s) detected?
if len(faces) == 10:
#Checkpoint 10: What is the condition of detected face ?
self.face_detected = 1
#Checkpoint 11: What happen to the car control command in this scenario ?
car_control_msg.v = 0
car_control_msg.omega = 0
#Checkpoint 12: publish car control command
self.publishCmd(car_control_msg)
#Checkpoint 13: What if there are no longer face detected ?
if len(faces) == 0:
#Checkpoint 14: Does the duckiebot will still be stopping ?
# Hint : Look back at Scenario 1, where no faces are detected.
self.face_detected = 0
#car_control_msg.v=0
#car_control_msg.omega=0
#self.publishCmd(car_control_msg)
if __name__ == "__main__":
#Checkpoint 15: Initial a node named "face_detector_X", X= you duckiebot name
rospy.init_node("face_detector_pi203",anonymous=False)
face_detector_wama_node = face_detector_wama()
rospy.spin()
|
pubsubmonitor.py | # pubsubmonitor.py
# ~~~~~~~~~
# This module implements the PubSubMonitor class.
# :authors: Justin Karneges, Konstantin Bokarius.
# :copyright: (c) 2015 by Fanout, Inc.
# :license: MIT, see LICENSE for more details.
import requests
import threading
import json
import copy
import urllib
import time
import socket
import logging
from base64 import b64decode
from ssl import SSLError
from .utilities import _gen_auth_jwt_header, _ensure_unicode
from requests.exceptions import ConnectionError
logger = logging.getLogger(__name__)
try:
from http.client import IncompleteRead
except:
from httplib import IncompleteRead
# The PubSubMonitor class monitors subscriptions to channels via an HTTP interface.
class PubSubMonitor(object):
# Initialize with base stream URI, JWT auth info, and callback used for indicating
# when a subscription event occurs.
def __init__(self, base_stream_uri, auth_jwt_claim=None, auth_jwt_key=None, callback=None):
if base_stream_uri[-1:] != '/':
base_stream_uri += '/'
self._stream_uri = base_stream_uri + 'subscriptions/stream/'
self._items_uri = base_stream_uri + 'subscriptions/items/'
self._auth_jwt_claim = None
if auth_jwt_claim:
self._auth_jwt_claim = copy.deepcopy(auth_jwt_claim)
self._auth_jwt_key = auth_jwt_key
self._callback = callback
self._lock = threading.Lock()
self._requests_session = requests.session()
self._stream_response = None
self._channels = set()
self._last_cursor = None
self._closed = False
self._historical_fetch_thread_result = False
self._historical_fetch_thread = None
self._thread_event = threading.Event()
self._stream_thread = threading.Thread(target=self._run_stream)
self._stream_thread.daemon = True
self._stream_thread.start()
# Determine if the specified channel has been subscribed to.
def is_channel_subscribed_to(self, channel):
found_channel = False
self._lock.acquire()
if channel in self._channels:
found_channel = True
self._lock.release()
return found_channel
# Close this instance and block until all threads are complete.
def close(self, blocking=False):
self._lock.acquire()
self._closed = True
self._callback = None
stream_thread = self._stream_thread
self._stream_thread = None
self._lock.release()
if blocking and stream_thread:
stream_thread.join()
# Determine if this instance is closed.
def is_closed(self):
return self._closed
# Run the stream connection.
def _run_stream(self):
logger.debug('stream thread started')
while not self._closed:
wait_interval = 0
retry_connection = True
while retry_connection:
time.sleep(wait_interval)
wait_interval = PubSubMonitor._increase_wait_interval(wait_interval)
try:
logger.debug('stream get %s' % self._stream_uri)
timeout = (5,60)
headers = {}
headers['Authorization'] = _gen_auth_jwt_header(
self._auth_jwt_claim, self._auth_jwt_key)
self._stream_response = self._requests_session.get(
self._stream_uri, headers=headers, stream=True,
timeout=timeout)
# No concern about a race condition here since there's 5 full
# seconds between the .get() method above returning and the
# timeout exception being thrown. The lines below are guaranteed
# to execute within 5 seconds.
if (self._stream_response.status_code >= 200 and
self._stream_response.status_code < 300):
retry_connection = False
elif (self._stream_response.status_code < 500 or
self._stream_response.status_code == 501 or
self._stream_response.status_code >= 600):
self.close()
raise ValueError(
'pubsubmonitor stream connection resulted in status code: %d' %
self._stream_response.status_code)
else:
continue
logger.debug('stream open')
self._try_historical_fetch()
except (socket.timeout, requests.exceptions.RequestException):
continue
got_subscribers = False
while not self._closed:
try:
self._thread_event.wait()
if not got_subscribers and self._historical_fetch_thread_result:
got_subscribers = True
if not got_subscribers:
break
self._monitor()
break
except (socket.timeout, requests.exceptions.Timeout, IncompleteRead):
logger.debug('stream timed out')
break
except (SSLError, OSError, ConnectionError) as e:
if 'timed out' in str(e):
logger.debug('stream timed out')
break
self._callback = None
raise
except:
logger.exception('error processing stream')
self._stream_response.close()
logger.debug('stream thread ended')
# Monitor the stream connection.
def _monitor(self):
for line in self._stream_response.iter_lines(chunk_size=1):
if self._closed:
break
now = time.time()
if (self._catch_stream_up_to_last_cursor and
now >= self._catch_stream_up_start_time + 60):
logger.debug('timed out waiting to catch up')
break
if not line:
continue
content = json.loads(_ensure_unicode(line))
last_cursor_parsed = PubSubMonitor._parse_cursor(
self._last_cursor)
prev_cursor_parsed = None
if 'prev_cursor' in content:
prev_cursor_parsed = PubSubMonitor._parse_cursor(
content['prev_cursor'])
if self._catch_stream_up_to_last_cursor:
if (prev_cursor_parsed and
prev_cursor_parsed != last_cursor_parsed):
continue
logger.debug('stream caught up to last cursor')
self._catch_stream_up_to_last_cursor = False
if (prev_cursor_parsed and
prev_cursor_parsed != last_cursor_parsed):
logger.debug('stream cursor mismatch: got=%s expected=%s' % (
prev_cursor_parsed, last_cursor_parsed))
self._try_historical_fetch()
self._thread_event.wait()
if not self._historical_fetch_thread_result:
break
else:
self._parse_items([content['item']])
self._last_cursor = content['cursor']
logger.debug('last cursor: %s' %
PubSubMonitor._parse_cursor(self._last_cursor))
# Try to complete the historical fetch.
def _try_historical_fetch(self):
self._thread_event.clear()
self._historical_fetch_thread_result = False
self._historical_fetch_thread = threading.Thread(target=self._run_historical_fetch)
self._historical_fetch_thread.daemon = True
self._historical_fetch_thread.start()
# Run the historical fetch.
def _run_historical_fetch(self):
try:
self._last_stream_cursor = None
logger.debug('catching up')
items = []
more_items_available = True
while more_items_available:
uri = self._items_uri
if self._last_cursor:
try:
uri += "?" + urllib.urlencode({'since': 'cursor:%s' % self._last_cursor})
except AttributeError:
uri += "?" + urllib.parse.urlencode({'since': 'cursor:%s' % self._last_cursor})
wait_interval = 0
retry_connection = True
while retry_connection:
if wait_interval == 64:
self._historical_fetch_thread_result = False
return
time.sleep(wait_interval)
wait_interval = PubSubMonitor._increase_wait_interval(wait_interval)
try:
if self._last_cursor:
logger.debug('history get %s (%s)' % (
uri, PubSubMonitor._parse_cursor(
self._last_cursor)))
else:
logger.debug('history get %s' % uri)
headers = {}
headers['Authorization'] = _gen_auth_jwt_header(
self._auth_jwt_claim, self._auth_jwt_key)
res = self._requests_session.get(uri, headers=headers,
timeout=30)
if (res.status_code >= 200 and
res.status_code < 300):
retry_connection = False
elif res.status_code == 404:
self._unsub_and_clear_channels()
self._historical_fetch_thread_result = False
return
elif (res.status_code < 500 or
res.status_code == 501 or
res.status_code >= 600):
self._historical_fetch_thread_result = False
self.close()
raise ValueError(
'pubsubmonitor historical fetch connection resulted in status code: %d' %
res.status_code)
except (socket.timeout, requests.exceptions.RequestException):
pass
content = json.loads(_ensure_unicode(res.content))
self._last_cursor = content['last_cursor']
if not content['items']:
more_items_available = False
else:
items.extend(content['items'])
self._parse_items(items)
self._historical_fetch_thread_result = True
self._catch_stream_up_to_last_cursor = True
self._catch_stream_up_start_time = time.time()
logger.debug('last cursor: %s' % PubSubMonitor._parse_cursor(self._last_cursor))
finally:
self._thread_event.set()
# Unsubscribe from and clear all channels.
def _unsub_and_clear_channels(self):
logger.debug('unsubbing and clearing channels')
if self._callback:
for channel in self._channels:
try:
self._callback('unsub', channel)
except Exception:
logger.exception('error calling callback')
self._lock.acquire()
self._channels.clear()
self._last_cursor = None
self._lock.release()
# Parse the specified items by updating the internal list and calling callbacks.
def _parse_items(self, items):
for item in items:
if (item['state'] == 'subscribed' and
item['channel'] not in self._channels):
logger.debug('added %s' % item['channel'])
if self._callback:
try:
self._callback('sub', item['channel'])
except Exception:
logger.exception('error calling callback')
self._lock.acquire()
self._channels.add(item['channel'])
self._lock.release()
elif (item['state'] == 'unsubscribed' and
item['channel'] in self._channels):
logger.debug('removed %s' % item['channel'])
if self._callback:
try:
self._callback('unsub', item['channel'])
except Exception:
logger.exception('error calling callback')
self._lock.acquire()
self._channels.remove(item['channel'])
self._lock.release()
# Parse the specified cursor.
@staticmethod
def _parse_cursor(raw_cursor):
decoded_cursor = b64decode(raw_cursor).decode('UTF-8')
return decoded_cursor[decoded_cursor.index('_')+1:]
# Increase the wait interval from the specified interval.
@staticmethod
def _increase_wait_interval(wait_interval):
if wait_interval <= 1:
return wait_interval + 1
elif wait_interval == 64:
return wait_interval
return wait_interval * 2
|
test.py | """Tests for wasser module"""
from multiprocessing import Process
from time import sleep
import unittest
import json
from wasser import Wasser
from simple_ssl_server import SimpleServer
class TestServer(SimpleServer):
"""Server for tests"""
def __init__(self, *args, **kwargs):
super(TestServer, self).__init__(*args, **kwargs)
def get(self, path):
if path == '/':
response = """HTTP/1.0 200 OK
Content-Type: text/html
<head>Test message ...</head>
<body>Hello there, general Kenobi</body>
"""
self.ssl_socket.send(response)
elif path == '/second':
reponse = """HTTP/1.1 200 OK
Content-Type: text/plain
Hello there"""
self.ssl_socket.send(response)
def post(self, path):
if path == '/':
if isinstance(self.message, dict):
json_string = json.dumps(self.message)
message_len = len(json_string)
response = "HTTP/1.0 200 OK\nContent-Type: application/json\nContent-Length: {0}\n\n{1}".format(message_len, json_string)
else:
message = str(self.message)
message_len = len(message)
response = "HTTP/1.0 200 OK\nContent-Type: text/plain\nContent-Length: {0}\n\n{1}".format(message_len, message)
self.ssl_socket.send(response)
class TestWasserRequest(unittest.TestCase):
"""Test for wasser requests"""
def setUp(self):
self.request = Wasser('certs/user.crt', 'certs/user.key', 'certs/CAcert.pem')
def test_post_json_success(self):
"""Test for POST application/json success"""
test_json = {'wasser':'stein'}
json_string = json.dumps(test_json)
message_len = len(json_string)
expecting_response = "HTTP/1.0 200 OK\nContent-Type: text/plain\nContent-Length: {0}\n\n{1}".format(message_len, json_string)
wasser_post_json_response = self.request.post('https://localhost:1027/', test_json)
self.assertEqual(expecting_response, wasser_post_json_response)
def test_post_text_success(self):
"""Test for POST text/plain success"""
message = 'How are you'
message_len = len(message)
expecting_response = "HTTP/1.0 200 OK\nContent-Type: text/plain\nContent-Length: {0}\n\n{1}".format(message_len, message)
wasser_post_text_response = self.request.post('https://localhost:1027/', message)
self.assertEqual(expecting_response, wasser_post_text_response)
def test_get_success(self):
"""Test for GET */* success"""
expecting_response = """HTTP/1.0 200 OK
Content-Type: text/html
<head>Test message ...</head>
<body>Hello there, general Kenobi</body>
"""
wasser_get_response = self.request.get('https://localhost:1027/')
self.assertEqual(expecting_response, wasser_get_response)
def tearDown(self):
pass
if __name__ == '__main__':
addr = ('127.0.0.1', 1027)
server_cert = 'certs/server.crt'
server_key = 'certs/server.key'
ca = 'certs/CAcert.pem'
server = TestServer(addr, server_cert, server_key, ca, ['/', '/second'])
server_process = Process(target=server.listen)
server_process.start()
sleep(1)
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestWasserRequest)
unittest.TextTestRunner(verbosity=2).run(suite)
server_process.terminate()
|
rabbit_mq_client.py | # Copyright 2016 deepsense.ai (CodiLime, Inc)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from threading import Thread
import pika
from pika.exceptions import ConnectionClosed
from utils import Logging
class RabbitMQClient(Logging):
_channel_impl = None
def __init__(self, address, credentials, exchange, exchange_type='topic'):
super(RabbitMQClient, self).__init__()
self._address = address
self._exchange = exchange
self._credentials = credentials
self._exchange_type = exchange_type
self._reset_consumer_thread(start=False)
self._declare_exchange()
def send(self, topic, message):
self._channel.basic_publish(exchange=self._exchange,
routing_key=topic,
body=message)
def subscribe(self, topic, handler):
queue_name = self._channel.queue_declare(queue='', exclusive=True).method.queue
self.logger.info('===+++===+++=== SUBSCRIBE {} ===+++===+++==='.format(queue_name))
self._channel.queue_bind(exchange=self._exchange,
queue=queue_name,
routing_key=topic)
self._channel.basic_consume(queue_name, handler)
if not self._consumer_thread.is_alive():
self._reset_consumer_thread(start=True)
def consume(self, inactivity_timeout, handler, timeout_handler):
queue_name = self._channel.queue_declare(queue='', exclusive=True).method.queue
self.logger.info('===+++===+++=== CONSUME {} ===+++===+++==='.format(queue_name))
self._channel.queue_bind(exchange=self._exchange,
queue=queue_name)
for message in self._channel.consume(queue=queue_name,
inactivity_timeout=inactivity_timeout):
if message is not None:
handler(self._channel, message)
else:
timeout_handler()
def _declare_exchange(self):
self._channel.exchange_declare(exchange=self._exchange,
exchange_type=self._exchange_type)
def _reset_consumer_thread(self, start):
self._consumer_thread = Thread(target=self._channel.start_consuming)
self._consumer_thread.daemon = True
if start:
assert not self._consumer_thread.is_alive()
self._consumer_thread.start()
@property
def _channel(self):
if not self._channel_impl:
connection = self._establish_connection_to_mq(self._address, self._credentials)
self._channel_impl = connection.channel()
return self._channel_impl
@staticmethod
def _establish_connection_to_mq(address, credentials):
while True:
try:
return pika.BlockingConnection(
pika.ConnectionParameters(host=address[0], port=address[1],
credentials=pika.PlainCredentials(credentials[0], credentials[1])))
except ConnectionClosed:
time.sleep(1)
class RabbitMQJsonSender(Logging):
def __init__(self, rabbit_mq_client, topic):
super(RabbitMQJsonSender, self).__init__()
self._rabbit_mq_client = rabbit_mq_client
self._topic = topic
def send(self, message):
try:
json_message = json.dumps(message)
except Exception as e:
self.logger.debug('JSON serialization failed: {}. Message: {}'.format(e, message))
return
self._rabbit_mq_client.send(topic=self._topic,
message=json_message)
class RabbitMQJsonReceiver(Logging):
def __init__(self, rabbit_mq_client):
super(RabbitMQJsonReceiver, self).__init__()
self._rabbit_mq_client = rabbit_mq_client
def subscribe(self, topic, handler):
self._rabbit_mq_client.subscribe(topic, self._wrapped_handler(handler))
self.logger.debug('Subscribed to topic {}'.format(topic))
@staticmethod
def _wrapped_handler(actual_handler):
# noinspection PyUnusedLocal
def handle(ch, method, properties, body):
message = json.loads(body)
return actual_handler(message)
return handle
|
test_protocol_cybinary.py | # -*- coding: utf-8 -*-
import collections
import multiprocessing
import os
import time
import pytest
from thriftpy._compat import u
from thriftpy.thrift import TType, TPayload, TDecodeException
from thriftpy.transport import TSocket, TServerSocket
from thriftpy.utils import hexlify
from thriftpy._compat import PYPY
pytestmark = pytest.mark.skipif(PYPY,
reason="cython not enabled in pypy.")
if not PYPY:
from thriftpy.protocol import cybin as proto
from thriftpy.transport.memory import TCyMemoryBuffer
from thriftpy.transport.buffered import TCyBufferedTransport
class TItem(TPayload):
thrift_spec = {
1: (TType.I32, "id", False),
2: (TType.LIST, "phones", TType.STRING, False),
}
default_spec = [("id", None), ("phones", None)]
def test_write_bool():
b = TCyMemoryBuffer()
proto.write_val(b, TType.BOOL, 1)
b.flush()
assert "01" == hexlify(b.getvalue())
def test_read_bool():
b = TCyMemoryBuffer(b'\x01')
val = proto.read_val(b, TType.BOOL)
assert True is val
def test_write_i8():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I08, 123)
b.flush()
assert "7b" == hexlify(b.getvalue())
def test_read_i8():
b = TCyMemoryBuffer(b'\x7b')
val = proto.read_val(b, TType.I08)
assert 123 == val
def test_write_i16():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I16, 12345)
b.flush()
assert "30 39" == hexlify(b.getvalue())
def test_read_i16():
b = TCyMemoryBuffer(b"09")
val = proto.read_val(b, TType.I16)
assert 12345 == val
def test_byteswap_i16():
i = 128
b = TCyMemoryBuffer()
proto.write_val(b, TType.I16, i)
b.flush()
v = proto.read_val(b, TType.I16)
assert v == i
def test_write_i32():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I32, 1234567890)
b.flush()
assert "49 96 02 d2" == hexlify(b.getvalue())
def test_read_i32():
b = TCyMemoryBuffer(b"I\x96\x02\xd2")
assert 1234567890 == proto.read_val(b, TType.I32)
def test_write_i64():
b = TCyMemoryBuffer()
proto.write_val(b, TType.I64, 1234567890123456789)
b.flush()
assert "11 22 10 f4 7d e9 81 15" == hexlify(b.getvalue())
def test_read_i64():
b = TCyMemoryBuffer(b"\x11\"\x10\xf4}\xe9\x81\x15")
assert 1234567890123456789 == proto.read_val(b, TType.I64)
def test_write_double():
b = TCyMemoryBuffer()
proto.write_val(b, TType.DOUBLE, 1234567890.1234567890)
b.flush()
assert "41 d2 65 80 b4 87 e6 b7" == hexlify(b.getvalue())
def test_read_double():
b = TCyMemoryBuffer(b"A\xd2e\x80\xb4\x87\xe6\xb7")
assert 1234567890.1234567890 == proto.read_val(b, TType.DOUBLE)
def test_write_string():
b = TCyMemoryBuffer()
proto.write_val(b, TType.STRING, "hello world!")
b.flush()
assert "00 00 00 0c 68 65 6c 6c 6f 20 77 6f 72 6c 64 21" == \
hexlify(b.getvalue())
b = TCyMemoryBuffer()
proto.write_val(b, TType.STRING, u("你好世界"))
b.flush()
assert "00 00 00 0c e4 bd a0 e5 a5 bd e4 b8 96 e7 95 8c" == \
hexlify(b.getvalue())
def test_read_string():
b = TCyMemoryBuffer(b"\x00\x00\x00\x0c"
b"\xe4\xbd\xa0\xe5\xa5\xbd\xe4\xb8\x96\xe7\x95\x8c")
assert u("你好世界") == proto.read_val(b, TType.STRING)
def test_write_message_begin():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
b.write_message_begin("test", TType.STRING, 1)
b.write_message_end()
assert "80 01 00 0b 00 00 00 04 74 65 73 74 00 00 00 01" == \
hexlify(trans.getvalue())
def test_write_message_begin_no_strict():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans, strict_write=False)
b.write_message_begin("test", TType.STRING, 1)
b.write_message_end()
assert "00 00 00 04 74 65 73 74 0b 00 00 00 01" == \
hexlify(trans.getvalue())
def test_read_message_begin():
b = TCyMemoryBuffer(b"\x80\x01\x00\x0b\x00\x00\x00\x04test"
b"\x00\x00\x00\x01")
res = proto.TCyBinaryProtocol(b).read_message_begin()
assert res == ("test", TType.STRING, 1)
def test_read_message_begin_not_strict():
b = TCyMemoryBuffer(b"\x00\x00\x00\x04test\x0b\x00\x00\x00\x01")
res = proto.TCyBinaryProtocol(b, strict_read=False).read_message_begin()
assert res == ("test", TType.STRING, 1)
def test_write_struct():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem(id=123, phones=["123456", "abcdef"])
b.write_struct(item)
b.write_message_end()
assert ("08 00 01 00 00 00 7b 0f 00 02 0b 00 00 00 02 00 00 00 "
"06 31 32 33 34 35 36 00 00 00 06 61 62 63 64 65 66 00") == \
hexlify(trans.getvalue())
def test_read_struct():
b = TCyMemoryBuffer(b"\x08\x00\x01\x00\x00\x00{"
b"\x0f\x00\x02\x0b\x00\x00\x00"
b"\x02\x00\x00\x00\x06123456"
b"\x00\x00\x00\x06abcdef\x00")
b = proto.TCyBinaryProtocol(b)
_item = TItem(id=123, phones=["123456", "abcdef"])
_item2 = TItem()
b.read_struct(_item2)
assert _item == _item2
def test_write_empty_struct():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem()
b.write_struct(item)
b.write_message_end()
assert "00" == hexlify(trans.getvalue())
def test_read_empty_struct():
b = TCyMemoryBuffer(b"\x00")
b = proto.TCyBinaryProtocol(b)
_item = TItem()
_item2 = TItem()
b.read_struct(_item2)
assert _item == _item2
def test_write_huge_struct():
b = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(b)
item = TItem(id=12345, phones=["1234567890"] * 100000)
b.write_struct(item)
b.write_message_end()
def test_read_huge_args():
class Hello(TPayload):
thrift_spec = {
1: (TType.STRING, "name", False),
2: (TType.STRING, "world", False),
}
default_spec = [("name", None), ("world", None)]
b = TCyMemoryBuffer()
item = Hello(name='我' * 326, world='你' * 1365)
p = proto.TCyBinaryProtocol(b)
p.write_struct(item)
p.write_message_end()
item2 = Hello()
p.read_struct(item2)
def test_skip_bool():
b = TCyMemoryBuffer()
proto.write_val(b, TType.BOOL, 1)
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.BOOL)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_double():
b = TCyMemoryBuffer()
proto.write_val(b, TType.DOUBLE, 0.123425897)
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.DOUBLE)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_string():
b = TCyMemoryBuffer()
proto.write_val(b, TType.STRING, "hello world")
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.STRING)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_list():
b = TCyMemoryBuffer()
proto.write_val(b, TType.LIST, [5, 6, 7, 8, 9], spec=TType.I32)
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.LIST)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_map():
b = TCyMemoryBuffer()
proto.write_val(b, TType.MAP, {"hello": 0.3456},
spec=(TType.STRING, TType.DOUBLE))
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.MAP)
assert 123 == proto.read_val(b, TType.I32)
def test_skip_struct():
b = TCyMemoryBuffer()
p = proto.TCyBinaryProtocol(b)
item = TItem(id=123, phones=["123456", "abcdef"])
p.write_struct(item)
p.write_message_end()
proto.write_val(b, TType.I32, 123)
b.flush()
proto.skip(b, TType.STRUCT)
assert 123 == proto.read_val(b, TType.I32)
def test_read_long_data():
val = 'z' * 97 * 1024
unix_sock = "/tmp/thriftpy_test.sock"
def serve():
server_sock = TServerSocket(unix_socket=unix_sock)
server_sock.listen()
client = server_sock.accept()
t = TCyBufferedTransport(client)
proto.write_val(t, TType.STRING, val)
t.flush()
# wait for client to read
time.sleep(1)
p = multiprocessing.Process(target=serve)
p.start()
time.sleep(0.1)
try:
sock = TSocket(unix_socket=unix_sock)
b = TCyBufferedTransport(sock)
b.open()
assert val == proto.read_val(b, TType.STRING)
sock.close()
finally:
p.terminate()
try:
os.remove(unix_sock)
except IOError:
pass
def test_write_wrong_arg_type():
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem(id="wrong type", phones=["123456", "abcdef"])
try:
b.write_struct(item)
except Exception:
pass
b.write_message_end()
item2 = TItem(id=123, phones=["123456", "abcdef"])
b.write_struct(item2)
b.write_message_end()
assert ("08 00 01 00 00 00 7b 0f 00 02 0b 00 00 00 02 00 00 00 "
"06 31 32 33 34 35 36 00 00 00 06 61 62 63 64 65 66 00") == \
hexlify(trans.getvalue())
def test_read_wrong_arg_type():
class TWrongTypeItem(TPayload):
thrift_spec = {
1: (TType.STRING, "id", False),
2: (TType.LIST, "phones", TType.STRING, False),
}
default_spec = [("id", None), ("phones", None)]
trans = TCyMemoryBuffer()
b = proto.TCyBinaryProtocol(trans)
item = TItem(id=58, phones=["123456", "abcdef"])
b.write_struct(item)
b.write_message_end()
item2 = TWrongTypeItem()
try:
b.read_struct(item2)
except Exception:
pass
item3 = TItem(id=123, phones=["123456", "abcdef"])
b.write_struct(item3)
b.write_message_end()
item4 = TItem()
b.read_struct(item4)
assert item3 == item4
def test_multiple_read_struct():
t = TCyMemoryBuffer()
p = proto.TCyBinaryProtocol(t)
item1 = TItem(id=123, phones=["123456", "abcdef"])
item2 = TItem(id=234, phones=["110", "120"])
p.write_struct(item1)
p.write_struct(item2)
p.write_message_end()
_item1 = TItem()
_item2 = TItem()
p.read_struct(_item1)
p.read_struct(_item2)
assert _item1 == item1 and _item2 == item2
def test_write_decode_error():
t = TCyMemoryBuffer()
p = proto.TCyBinaryProtocol(t)
class T(TPayload):
thrift_spec = {
1: (TType.I32, "id", False),
2: (TType.LIST, "phones", TType.STRING, False),
3: (TType.STRUCT, "item", TItem, False),
4: (TType.MAP, "mm", (TType.STRING, (TType.STRUCT, TItem)), False)
}
default_spec = [("id", None), ("phones", None), ("item", None),
("mm", None)]
cases = [
(T(id="hello"), "Field 'id(1)' of 'T' needs type 'I32', but the value is `'hello'`"), # noqa
(T(phones=[90, 12]), "Field 'phones(2)' of 'T' needs type 'LIST<STRING>', but the value is `[90, 12]`"), # noqa
(T(item=12), "Field 'item(3)' of 'T' needs type 'TItem', but the value is `12`"), # noqa
(T(mm=[45, 56]), "Field 'mm(4)' of 'T' needs type 'MAP<STRING, TItem>', but the value is `[45, 56]`") # noqa
]
for obj, res in cases:
with pytest.raises(TDecodeException) as exc:
p.write_struct(obj)
assert str(exc.value) == res
def test_type_tolerance():
t = TCyMemoryBuffer()
p = proto.TCyBinaryProtocol(t)
class T(TPayload):
thrift_spec = {
1: (TType.LIST, "phones", TType.STRING, False),
2: (TType.MAP, "mm", (TType.I32, (TType.LIST, TType.I32)), False)
}
default_spec = [("phones", None), ("mm", None)]
defaultdict = collections.defaultdict(list)
defaultdict.update({234: [3, 4, 5], 123: [6, 7, 8]})
cases = [
T(phones=["123", "234"]),
T(phones=("123", "234")),
T(phones=set(["123", "234"])),
T(phones={"123": 'a', "234": 'b'}),
T(mm={234: [3, 4, 5], 123: [6, 7, 8]}),
T(mm=collections.defaultdict(list)),
T(mm=defaultdict)
]
for obj in cases:
p.write_struct(obj)
|
na_santricity_discover.py | #!/usr/bin/python
# (c) 2020, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: na_santricity_discover
short_description: NetApp E-Series discover E-Series storage systems
description: Module searches a subnet range and returns any available E-Series storage systems.
author: Nathan Swartz (@ndswartz)
options:
subnet_mask:
description:
- This is the IPv4 search range for discovering E-Series storage arrays.
- IPv4 subnet mask specified in CIDR form. Example 192.168.1.0/24 would search the range 192.168.1.0 to 192.168.1.255.
- Be sure to include all management paths in the search range.
type: str
required: true
ports:
description:
- This option specifies which ports to be tested during the discovery process.
- The first usable port will be used in the returned API url.
type: list
default: [8443]
required: false
proxy_url:
description:
- Web Services Proxy REST API URL. Example https://192.168.1.100:8443/devmgr/v2/
type: str
required: false
proxy_username:
description:
- Web Service Proxy username
type: str
required: false
proxy_password:
description:
- Web Service Proxy user password
type: str
required: false
proxy_validate_certs:
description:
- Whether to validate Web Service Proxy SSL certificate
type: bool
default: true
required: false
prefer_embedded:
description:
- Give preference to Web Services Embedded when an option exists for both Web Services Proxy and Embedded.
- Web Services Proxy will be utilized when available by default.
type: bool
default: false
required: false
notes:
- Only available for platforms E2800 or later (SANtricity Web Services Embedded REST API must be available).
- All E-Series storage systems with SANtricity version 11.62 or later will be discovered.
- Only E-Series storage systems without a set admin password running SANtricity versions prior to 11.62 will be discovered.
- Use SANtricity Web Services Proxy to discover all systems regardless of SANricity version or password.
requirements:
- ipaddress
"""
EXAMPLES = """
- name: Discover all E-Series storage systems on the network.
na_santricity_discover:
subnet_mask: 192.168.1.0/24
"""
RETURN = """
systems_found:
description: Success message
returned: on success
type: dict
sample: '{"012341234123": {
"addresses": ["192.168.1.184", "192.168.1.185"],
"api_urls": ["https://192.168.1.184:8443/devmgr/v2/", "https://192.168.1.185:8443/devmgr/v2/"],
"label": "ExampleArray01",
"proxy_ssid: "",
"proxy_required": false},
"012341234567": {
"addresses": ["192.168.1.23", "192.168.1.24"],
"api_urls": ["https://192.168.1.100:8443/devmgr/v2/"],
"label": "ExampleArray02",
"proxy_ssid": "array_ssid",
"proxy_required": true}}'
"""
import json
import multiprocessing
import threading
from time import sleep
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import request
from ansible.module_utils._text import to_native
try:
import ipaddress
except ImportError:
HAS_IPADDRESS = False
else:
HAS_IPADDRESS = True
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
class NetAppESeriesDiscover:
"""Discover E-Series storage systems."""
MAX_THREAD_POOL_SIZE = 256
CPU_THREAD_MULTIPLE = 32
SEARCH_TIMEOUT = 30
DEFAULT_CONNECTION_TIMEOUT_SEC = 30
DEFAULT_DISCOVERY_TIMEOUT_SEC = 300
def __init__(self):
ansible_options = dict(subnet_mask=dict(type="str", required=True),
ports=dict(type="list", required=False, default=[8443]),
proxy_url=dict(type="str", required=False),
proxy_username=dict(type="str", required=False),
proxy_password=dict(type="str", required=False, no_log=True),
proxy_validate_certs=dict(type="bool", default=True, required=False),
prefer_embedded=dict(type="bool", default=False, required=False))
required_together = [["proxy_url", "proxy_username", "proxy_password"]]
self.module = AnsibleModule(argument_spec=ansible_options, required_together=required_together)
args = self.module.params
self.subnet_mask = args["subnet_mask"]
self.prefer_embedded = args["prefer_embedded"]
self.ports = []
self.proxy_url = args["proxy_url"]
if args["proxy_url"]:
parsed_url = list(urlparse.urlparse(args["proxy_url"]))
parsed_url[2] = "/devmgr/utils/about"
self.proxy_about_url = urlparse.urlunparse(parsed_url)
parsed_url[2] = "/devmgr/v2/"
self.proxy_url = urlparse.urlunparse(parsed_url)
self.proxy_username = args["proxy_username"]
self.proxy_password = args["proxy_password"]
self.proxy_validate_certs = args["proxy_validate_certs"]
for port in args["ports"]:
if str(port).isdigit() and 0 < port < 2 ** 16:
self.ports.append(str(port))
else:
self.module.fail_json(msg="Invalid port! Ports must be positive numbers between 0 and 65536.")
self.systems_found = {}
def check_ip_address(self, systems_found, address):
"""Determine where an E-Series storage system is available at a specific ip address."""
for port in self.ports:
if port == "8080":
url = "http://%s:%s/" % (address, port)
else:
url = "https://%s:%s/" % (address, port)
try:
rc, about = request(url + "devmgr/v2/storage-systems/1/about", validate_certs=False, force_basic_auth=False, ignore_errors=True)
if about["serialNumber"] in systems_found:
systems_found[about["serialNumber"]]["api_urls"].append(url)
else:
systems_found.update({about["serialNumber"]: {"api_urls": [url], "label": about["name"],
"addresses": [], "proxy_ssid": "", "proxy_required": False}})
break
except Exception as error:
try:
rc, sa_data = request(url + "devmgr/v2/storage-systems/1/symbol/getSAData", validate_certs=False, force_basic_auth=False,
ignore_errors=True)
if rc == 401: # Unauthorized
self.module.warn(
"Fail over and discover any storage system without a set admin password. This will discover systems without a set password"
" such as newly deployed storage systems. Address [%s]." % address)
# Fail over and discover any storage system without a set admin password. This will cover newly deployed systems.
rc, graph = request(url + "graph", validate_certs=False, url_username="admin", url_password="", timeout=self.SEARCH_TIMEOUT)
sa_data = graph["sa"]["saData"]
if sa_data["chassisSerialNumber"] in systems_found:
systems_found[sa_data["chassisSerialNumber"]]["api_urls"].append(url)
else:
systems_found.update({sa_data["chassisSerialNumber"]: {"api_urls": [url], "label": sa_data["storageArrayLabel"],
"addresses": [], "proxy_ssid": "", "proxy_required": False}})
break
except Exception as error:
pass
def no_proxy_discover(self):
"""Discover E-Series storage systems using embedded web services."""
thread_pool_size = min(multiprocessing.cpu_count() * self.CPU_THREAD_MULTIPLE, self.MAX_THREAD_POOL_SIZE)
subnet = list(ipaddress.ip_network(u"%s" % self.subnet_mask))
thread_pool = []
search_count = len(subnet)
for start in range(0, search_count, thread_pool_size):
end = search_count if (search_count - start) < thread_pool_size else start + thread_pool_size
for address in subnet[start:end]:
thread = threading.Thread(target=self.check_ip_address, args=(self.systems_found, address))
thread_pool.append(thread)
thread.start()
for thread in thread_pool:
thread.join()
def verify_proxy_service(self):
"""Verify proxy url points to a web services proxy."""
try:
rc, about = request(self.proxy_about_url, validate_certs=self.proxy_validate_certs)
if not about["runningAsProxy"]:
self.module.fail_json(msg="Web Services is not running as a proxy!")
except Exception as error:
self.module.fail_json(msg="Proxy is not available! Check proxy_url. Error [%s]." % to_native(error))
def test_systems_found(self, systems_found, serial, label, addresses):
"""Verify and build api urls."""
api_urls = []
for address in addresses:
for port in self.ports:
if port == "8080":
url = "http://%s:%s/devmgr/" % (address, port)
else:
url = "https://%s:%s/devmgr/" % (address, port)
try:
rc, response = request(url + "utils/about", validate_certs=False, timeout=self.SEARCH_TIMEOUT)
api_urls.append(url + "v2/")
break
except Exception as error:
pass
systems_found.update({serial: {"api_urls": api_urls,
"label": label,
"addresses": addresses,
"proxy_ssid": "",
"proxy_required": False}})
def proxy_discover(self):
"""Search for array using it's chassis serial from web services proxy."""
self.verify_proxy_service()
subnet = ipaddress.ip_network(u"%s" % self.subnet_mask)
try:
rc, request_id = request(self.proxy_url + "discovery", method="POST", validate_certs=self.proxy_validate_certs,
force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password,
data=json.dumps({"startIP": str(subnet[0]), "endIP": str(subnet[-1]),
"connectionTimeout": self.DEFAULT_CONNECTION_TIMEOUT_SEC}))
# Wait for discover to complete
try:
for iteration in range(self.DEFAULT_DISCOVERY_TIMEOUT_SEC):
rc, discovered_systems = request(self.proxy_url + "discovery?requestId=%s" % request_id["requestId"],
validate_certs=self.proxy_validate_certs,
force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password)
if not discovered_systems["discoverProcessRunning"]:
thread_pool = []
for discovered_system in discovered_systems["storageSystems"]:
addresses = []
for controller in discovered_system["controllers"]:
addresses.extend(controller["ipAddresses"])
# Storage systems with embedded web services.
if "https" in discovered_system["supportedManagementPorts"] and self.prefer_embedded:
thread = threading.Thread(target=self.test_systems_found,
args=(self.systems_found, discovered_system["serialNumber"], discovered_system["label"], addresses))
thread_pool.append(thread)
thread.start()
# Storage systems without embedded web services.
else:
self.systems_found.update({discovered_system["serialNumber"]: {"api_urls": [self.proxy_url],
"label": discovered_system["label"],
"addresses": addresses,
"proxy_ssid": "",
"proxy_required": True}})
for thread in thread_pool:
thread.join()
break
sleep(1)
else:
self.module.fail_json(msg="Timeout waiting for array discovery process. Subnet [%s]" % self.subnet_mask)
except Exception as error:
self.module.fail_json(msg="Failed to get the discovery results. Error [%s]." % to_native(error))
except Exception as error:
self.module.fail_json(msg="Failed to initiate array discovery. Error [%s]." % to_native(error))
def update_proxy_with_proxy_ssid(self):
"""Determine the current proxy ssid for all discovered-proxy_required storage systems."""
# Discover all added storage systems to the proxy.
systems = []
try:
rc, systems = request(self.proxy_url + "storage-systems", validate_certs=self.proxy_validate_certs,
force_basic_auth=True, url_username=self.proxy_username, url_password=self.proxy_password)
except Exception as error:
self.module.fail_json(msg="Failed to ascertain storage systems added to Web Services Proxy.")
for system_key, system_info in self.systems_found.items():
if self.systems_found[system_key]["proxy_required"]:
for system in systems:
if system_key == system["chassisSerialNumber"]:
self.systems_found[system_key]["proxy_ssid"] = system["id"]
def discover(self):
"""Discover E-Series storage systems."""
missing_packages = []
if not HAS_IPADDRESS:
missing_packages.append("ipaddress")
if missing_packages:
self.module.fail_json(msg="Python packages are missing! Packages [%s]." % ", ".join(missing_packages))
if self.proxy_url:
self.proxy_discover()
self.update_proxy_with_proxy_ssid()
else:
self.no_proxy_discover()
self.module.exit_json(msg="Discover process complete.", systems_found=self.systems_found, changed=False)
def main():
discover = NetAppESeriesDiscover()
discover.discover()
if __name__ == "__main__":
main()
|
tunnel.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=import-error,unused-import
#
# Modified from tunnel.py in appservice module
#
import sys
import ssl
import json
import socket
import time
import traceback
import logging as logs
from contextlib import closing
from datetime import datetime
from threading import Thread
import websocket
from websocket import create_connection, WebSocket
from msrestazure.azure_exceptions import CloudError
from azure.cli.core._profile import Profile
from azure.cli.core.util import should_disable_connection_verify
import requests
import urllib3
from knack.util import CLIError
from knack.log import get_logger
logger = get_logger(__name__)
# pylint: disable=no-member,too-many-instance-attributes,bare-except,no-self-use
class TunnelServer:
def __init__(self, cli_ctx, local_addr, local_port, bastion, remote_host, remote_port):
self.local_addr = local_addr
self.local_port = int(local_port)
if self.local_port != 0 and not self.is_port_open():
raise CLIError('Defined port is currently unavailable')
self.bastion = bastion
self.remote_host = remote_host
self.remote_port = remote_port
self.client = None
self.ws = None
self.last_token = None
self.node_id = None
self.cli_ctx = cli_ctx
logger.info('Creating a socket on port: %s', self.local_port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logger.info('Setting socket options')
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logger.info('Binding to socket on local address and port')
self.sock.bind((self.local_addr, self.local_port))
if self.local_port == 0:
self.local_port = self.sock.getsockname()[1]
logger.info('Auto-selecting port: %s', self.local_port)
logger.info('Finished initialization')
def is_port_open(self):
is_port_open = False
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
if sock.connect_ex(('', self.local_port)) == 0:
logger.info('Port %s is NOT open', self.local_port)
else:
logger.info('Port %s is open', self.local_port)
is_port_open = True
return is_port_open
def _get_auth_token(self):
profile = Profile(cli_ctx=self.cli_ctx)
# Generate an Azure token with the VSTS resource app id
auth_token, _, _ = profile.get_raw_token()
content = {
'resourceId': self.remote_host,
'protocol': 'tcptunnel',
'workloadHostPort': self.remote_port,
'aztoken': auth_token[1],
'token': self.last_token,
}
if self.node_id:
custom_header = {'X-Node-Id': self.node_id}
else:
custom_header = {}
web_address = 'https://{}/api/tokens'.format(self.bastion.dns_name)
response = requests.post(web_address, data=content, headers=custom_header,
verify=(not should_disable_connection_verify()))
response_json = None
if response.content is not None:
response_json = json.loads(response.content.decode("utf-8"))
if response.status_code not in [200]:
if response_json is not None and response_json["message"] is not None:
exp = CloudError(response, error=response_json["message"])
else:
exp = CloudError(response)
raise exp
self.last_token = response_json["authToken"]
self.node_id = response_json["nodeId"]
return self.last_token
def _listen(self):
self.sock.setblocking(True)
self.sock.listen(100)
index = 0
while True:
self.client, _address = self.sock.accept()
auth_token = self._get_auth_token()
host = 'wss://{}/webtunnel/{}?X-Node-Id={}'.format(self.bastion.dns_name, auth_token, self.node_id)
self.ws = create_connection(host,
sockopt=((socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),),
sslopt={'cert_reqs': ssl.CERT_NONE},
enable_multithread=True)
logger.info('Websocket, connected status: %s', self.ws.connected)
index = index + 1
logger.info('Got debugger connection... index: %s', index)
debugger_thread = Thread(target=self._listen_to_client, args=(self.client, self.ws, index))
web_socket_thread = Thread(target=self._listen_to_web_socket, args=(self.client, self.ws, index))
debugger_thread.start()
web_socket_thread.start()
logger.info('Both debugger and websocket threads started...')
logger.info('Successfully connected to local server..')
debugger_thread.join()
web_socket_thread.join()
self.cleanup()
logger.info('Both debugger and websocket threads stopped...')
logger.info('Stopped local server..')
def _listen_to_web_socket(self, client, ws_socket, index):
try:
while True:
logger.info('Waiting for websocket data, connection status: %s, index: %s', ws_socket.connected, index)
data = ws_socket.recv()
logger.info('Received websocket index: %s', index)
if data:
# Set the response to echo back the recieved data
response = data
logger.info('Sending to debugger, index: %s', index)
client.send(response)
logger.info('Done sending to debugger, index: %s', index)
else:
logger.info('Websocket close, index: %s', index)
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
logger.info('Client disconnected!, index: %s', index)
client.close()
ws_socket.close()
def _listen_to_client(self, client, ws_socket, index):
try:
buf = bytearray(4096)
while True:
logger.info('Waiting for debugger data, index: %s', index)
nbytes = client.recv_into(buf, len(buf))
logger.info('Received debugger data, nbytes: %s, index: %s', nbytes, index)
if nbytes > 0:
responseData = buf[0:nbytes]
logger.info('Sending to websocket, index: %s', index)
ws_socket.send_binary(responseData)
logger.info('Done sending to websocket, index: %s', index)
else:
logger.info('Client close, index: %s', index)
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
logger.info('Client disconnected %s', index)
client.close()
ws_socket.close()
def start_server(self):
self._listen()
def cleanup(self):
if self.last_token:
logger.info('Cleaning up session')
if self.node_id:
custom_header = {'X-Node-Id': self.node_id}
else:
custom_header = {}
web_address = 'https://{}/api/tokens/{}'.format(self.bastion.dns_name, self.last_token)
response = requests.delete(web_address, headers=custom_header,
verify=(not should_disable_connection_verify()))
if response.status_code == 404:
logger.info('Session already deleted')
elif response.status_code not in [200, 204]:
exp = CloudError(response)
raise exp
self.last_token = None
self.node_id = None
else:
logger.debug('Nothing to clean up')
def get_port(self):
return self.local_port
|
parametersearch.py | """
Distributed Parameter Search
============================
This library distributes parameter searches over many clients.
Its designed for simplicity and hackability first.
It's author is Thomas Unterthiner.
Simple Usage
------------
This is a single-file implementation, so just copy ```parametersearch.py``` to your source directory.
From there, do `from parametersearch import ParameterSearch` to use it.
`ParameterSearch` can be used to define all the different hyperparameter settings you want to try out.
As example, this piece of code defines two settings of different learning rates:
ps = ParameterSearch(output_file="results.csv") # results will be stored in results.csv
ps.add_parameter_setting({"learning_rate": 1e-2})
ps.add_parameter_setting({"learning_rate": 1e-3})
or you can use ```define_search_grid``` to set up a grid search:
param_grid = [{
'n_estimators': [20, 50],
'max_features': [14, 28]
}]
ps = define_search_grid(param_grid, output_file="results.csv")
Then, you can iterate over the created ParameterSearch instance to process the different settings, and
use the ```submit_result``` method to report the results back to the ParameterSearch object:
for (job_id, hyperparams) in ps:
print("Working on job %d: %s" % (job_id, hyperparams), flush=True)
model = sklearn.ensemble.RandomForestClassifier(**hyperparams)
model.fit(x_tr, y_tr)
p_va = model.predict(x_va)
accuracy_va = metrics.accuracy_score(y_va, p_va)
ps.submit_result(job_id, accuracy_va)
Distributed Usage
-----------------
You can distribute your hyperparameter search over several machines. To do this, set up your ParameterSearch
as usual in your server process, then call ```ParameterSearch.start_server(...)``` to make your
hyperparameter search available to the outside world.
Next start up any client processes: these create ParameterSearch instances that connect to the server process:
ps = ParameterSearch(host="my.server.com", port=5732)
And then use the ParameterSearch as usual. It will connect to the server and receive parameter settings defined
there. See ```example.py``` for a simple example.
License
-------
Distributed Parameter Search is copyrighted (c) 2019 by Thomas Unterthiner and licensed under the
`General Public License (GPL) Version 2 or higher <http://www.gnu.org/licenses/gpl-2.0.html>`_.
See ``LICENSE.md`` for the full details.
"""
import csv
import json
import logging
import socket
import struct
import socketserver
import threading
from sklearn.model_selection import ParameterGrid
import signal
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
def define_search_grid(param_grid, output_file=None):
"""Creates a grid search.
param_grid: specifies the points in the grid. This uses the same format as GridSearchCV in sklearn, see
https://scikit-learn.org/stable/modules/grid_search.html#grid-search
output_file: A CSV file that will be used to store the results of a hyperparameter search (optional)
Returns: a ParameterSearch object
"""
m = ParameterSearch(output_file=output_file)
for param_setting in ParameterGrid(param_grid):
m.add_parameter_setting(param_setting)
return m
class ParameterSearch(object):
"""A ParameterSearch stores the settings that will be tried out, as well as their eventual results.
A ParameterSearch can work over a network, with one instance acting as server that hands out parameter
settings and accepts results from remote client instances.
Note: settings are not stored in order, i.e., the order in which you add them to the ParameterSearch does not
necessarily coincide with the order in which they are handed out.
"""
def __init__(self, host=None, port=None, output_file=None):
"""Creates a new ParameterSearch instance.
If this is a client instance that gets its parameters from a remote instance, specify their host/port here.
host: host name or IP address of server ParameterSearch instance (optional, also requires a port)
port: port of server ParameterSearch instance port (optional, also requires a host)
output_file: A CSV file that will be used to store the results of a hyperparameter search
"""
self.waiting_jobs = []
self.running_jobs = []
self.working_jobs = []
self.log = logging.getLogger('dipasearch')
self.log.setLevel(logging.INFO)
self.is_serving = False
if host is not None and port is None:
raise RuntimeError("passed address but no port")
elif port is not None and host is None:
raise RuntimeError("passed port but no address")
if host is not None and port is not None and output_file is not None:
raise RuntimeError("client instances cannot store output files")
self.is_client = host is not None and port is not None
if not self.is_client:
self.database = Database(output_file)
self.database_lock = threading.Lock()
else:
self.host = socket.gethostbyname(host)
self.port = port
def add_parameter_setting(self, setting):
"""Adds a setting to the search.
setting: a dictionary that maps setting-names to the values they take
"""
job = self.database.add_job(setting)
self.waiting_jobs.append(job.id)
def start_server(self, host, port, as_thread=False):
"""Starts accepting remote requests for jobs and waits for replies.
host: the IP address or hostname from which to serve from
port: the port from which to serve from
as_thread: if true, start the server in a separate thread
"""
assert not self.is_client, "Clients cannot act as Servers"
self.log.info('Starting up server on %s:%d' % (host, port))
self.is_serving = True
def _server_loop(host_, port_, param_search_server):
"""The event loop for the server thread"""
h = socket.gethostbyname(host_)
socketserver.ThreadingTCPServer.allow_reuse_address = True
with socketserver.ThreadingTCPServer((h, port_), _ServerRequestHandler(param_search_server)) as server:
server.timeout = 1 # seconds until we'll check if we need to stop serving
while param_search_server.is_serving:
server.handle_request()
if as_thread:
t = threading.Thread(target=_server_loop, args=(host, port, self, ))
t.start()
else:
_server_loop(host, port, self)
def __iter__(self):
return self
def __next__(self):
job_id, params = self.get_next_setting()
if job_id is None:
raise StopIteration()
else:
return job_id, params
def get_results(self):
assert not self.is_client, "Clients don't have access to the result list"
return list(self.database.get_all_jobs())
def get_next_setting(self):
"""Gets the next untried hyperparameter setting.
Optionally, the setting can be requested from a remote ParameterSearch instance running on another
host/port.
Returns:
a pair of job_id and the setting to try out, or (None, None) if there are no more settings
"""
if self.is_client:
job_id, data = self._request_remote_parameter_set()
if job_id is not None:
self.working_jobs.append(job_id)
return job_id, data
if not self.waiting_jobs:
return None, None
with self.database_lock:
job_id = self.waiting_jobs.pop(0)
self.running_jobs.append(job_id)
job = self.database.get_job(job_id)
return job.id, job.data
def submit_result(self, job_id, result):
"""
Submits the results of a job.
"""
if self.is_client:
if job_id not in self.working_jobs:
raise RuntimeError(f'This client is not working on job {job_id}')
self._submit_remote_job(job_id, result)
self.working_jobs.remove(job_id)
return
with self.database_lock:
if job_id not in self.running_jobs:
self.log.info(f"submission rejected, job {job_id}: Job not running.")
raise RuntimeError("Job not running.")
self.running_jobs.remove(job_id)
if result == 0:
self.database.complete_job(job_id, result)
else:
self.log.info(f"job failed, job {job_id}: reattaching to waiting jobs")
self.waiting_jobs.append(job_id)
print('Running: ', self.running_jobs)
print('Waiting: ', self.waiting_jobs)
if not self.running_jobs and not self.waiting_jobs:
self.log.info("All jobs finished, sending server shutdown signal")
self.is_serving = False
def _submit_remote_job(self, job_id, result):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
data = {'id': job_id, 'result': result}
sock.connect((self.host, self.port))
sock.send(b'S')
data = json.dumps(data).encode("utf8")
self.log.info("submitting data: %s" % data)
sock.send(struct.pack("<I", len(data)))
sock.sendall(data)
is_ok = struct.unpack("b", sock.recv(1))[0]
if not is_ok:
with sock.makefile() as f:
error_msg = f.read()
raise RuntimeError("Result submission failed: %s" % error_msg)
return
def _request_remote_parameter_set(self, retries=0):
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((self.host, self.port))
sock.send(b'R')
with sock.makefile() as f:
data = f.read()
if data is None or not data:
return None, None
d = json.loads(data)
job_id = d['id']
data = d['data']
return job_id, data
except ConnectionRefusedError as err:
self.log.warning("connection refused: %s (retries: %d)" % (err, retries))
if retries > 0:
time.sleep(1)
return self._request_remote_parameter_set(retries-1)
else:
return None, None
class _ServerRequestHandler(socketserver.StreamRequestHandler):
"""Handles the requests of a ParameterSearch server instance"""
def __init__(self, parameter_search):
self.parameter_search = parameter_search
# see https://stackoverflow.com/questions/15889241/send-a-variable-to-a-tcphandler-in-python
def __call__(self, request, client_address, server):
h = _ServerRequestHandler(self.parameter_search)
socketserver.StreamRequestHandler.__init__(h, request, client_address, server)
def handle(self):
msgtype = self.rfile.read(1)
if msgtype == b'R': # request job
job_id, job_data = self.parameter_search.get_next_setting()
if job_id is None:
self.parameter_search.log.warning("no jobs left for current request")
return
self.parameter_search.log.info("new request, sending job %d" % job_id)
data = {'id': job_id, 'data': job_data}
data = json.dumps(data).encode("utf8")
self.wfile.write(data)
elif msgtype == b"S": # finished a job
self.parameter_search.log.debug("preparing to receive submission")
buflen = struct.unpack("<I", self.rfile.read(4))[0]
data = self.rfile.read(buflen).decode("utf8")
data = json.loads(data)
self.parameter_search.log.info("new submission, job %d, result: %s" % (data['id'], data['result']))
try:
self.parameter_search.submit_result(data['id'], data['result'])
except RuntimeError as err:
self.wfile.write(struct.pack("b", 0))
self.wfile.write(str(err).encode("utf8"))
else:
self.wfile.write(struct.pack("b", 1))
self.wfile.flush()
else:
self.parameter_search.error("Unknown message type: %s" % msgtype)
raise RuntimeError("Unknown message type: %s" % msgtype)
class ParameterClient(object):
def __init__(self, host=None, port=None):
"""Creates a new ParameterSearch instance.
If this is a client instance that gets its parameters from a remote instance, specify their host/port here.
host: host name or IP address of server ParameterSearch instance (optional, also requires a port)
port: port of server ParameterSearch instance port (optional, also requires a host)
output_file: A CSV file that will be used to store the results of a hyperparameter search
"""
self.working_jobs = []
self.log = logging.getLogger('dipasearch')
self.log.setLevel(logging.INFO)
# we are client
self.is_serving = False
if host is not None and port is None:
raise RuntimeError("passed address but no port")
elif port is not None and host is None:
raise RuntimeError("passed port but no address")
self.is_client = True
self.host = socket.gethostbyname(host)
self.port = port
def __iter__(self):
return self
def __next__(self):
job_id, params = self.get_next_setting()
if job_id is None:
raise StopIteration()
else:
return job_id, params
def get_next_setting(self):
"""Gets the next untried hyperparameter setting.
Optionally, the setting can be requested from a remote ParameterSearch instance running on another
host/port.
Returns:
a pair of job_id and the setting to try out, or (None, None) if there are no more settings
"""
if self.is_client:
job_id, data = self._request_remote_parameter_set()
if job_id is not None:
self.working_jobs.append(job_id)
return job_id, data
class _Job(object):
"""A Job is the internal representation of a hyperparameter setting."""
def __init__(self, id, data, result):
self.id = id
self.data = data
self.result = result
class Database(object):
"""This class stores and manages all past and current _Job instances."""
def __init__(self, output_file=None):
self.data = {}
self._max_idx = 1
self.output_file = output_file
def add_job(self, job_data):
job_id = self._max_idx
self.data[job_id] = _Job(job_id, job_data, None)
self._max_idx += 1
return self.data[job_id]
@property
def n_jobs(self):
return len(self.data)
def get_job(self, job_id):
return self.data[job_id]
def complete_job(self, job_id, result):
if job_id not in self.data:
raise RuntimeError("Job does not exist")
job = self.data[job_id]
if job.result is not None:
raise RuntimeError("Job already completed")
job.result = result
self._save_results()
def _save_results(self):
if self.output_file is None:
return
with open(self.output_file, "w", newline='') as f:
fieldnames = ["id", "parameters", "result"]
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for j in self.data.values():
params = {"id": j.id, "parameters": json.dumps(j.data), "result": j.result}
writer.writerow(params)
def get_all_jobs(self):
for j in self.data.values():
yield (j.id, j.data, j.result)
|
graph.py | from config import api_key
from Queue import Queue
import sys
import requests
import threading
import networkx as nx
import matplotlib.pyplot as plt
session = requests.Session()
session.params = {"key": api_key}
def get_request_json(route, params):
r = session.get("https://api.steampowered.com/"+route, params=params)
r.raise_for_status()
return r.json()
user_data = {}
ids_to_process = Queue()
lock = threading.Lock()
working = True
def add_user_data():
while not ids_to_process.empty() or working:
steam_id = ids_to_process.get()
try:
# Get friends and individual game playtimes for the user
params = {"steamid": steam_id}
friends = [friend["steamid"] for friend in get_request_json("ISteamUser/GetFriendList/v0001", params)["friendslist"]["friends"]]
games_response = get_request_json("IPlayerService/GetOwnedGames/v0001", params)["response"]
games = {game_info["appid"]: game_info["playtime_forever"] for game_info in games_response["games"]} if "games" in games_response else {}
lock.acquire()
user_data[steam_id] = {}
user_data[steam_id]["friends"] = friends
user_data[steam_id]["games"] = games
lock.release()
except requests.exceptions.HTTPError:
# Ignore private profiles
pass
ids_to_process.task_done()
if len(sys.argv) != 3:
print "Incorrect number of input arguments"
print "Usage: python graph.py <username> <degrees of separation>"
sys.exit(1)
# Spawn worker threads
for _ in range(20):
thread = threading.Thread(target=add_user_data)
thread.daemon = True
thread.start()
# Get user data for the specified username
username = sys.argv[1]
steam_id_response = get_request_json("ISteamUser/ResolveVanityURL/v0001", {"vanityurl": username})["response"]
if "steamid" not in steam_id_response:
print "Invalid steam username; user either does not exist or has a private profile"
sys.exit(1)
ids_to_process.put(steam_id_response["steamid"])
ids_to_process.join()
# For the number of degrees of separation specified, gather data for friends
degrees_of_separation = int(sys.argv[2])
for i in range(degrees_of_separation):
# Add user data for all current users' friends that are not in user_data yet
for user in set([friend for friend_list in [user["friends"] for user in user_data.values()] for friend in friend_list if friend not in user_data]):
ids_to_process.put(user)
working = i != degrees_of_separation - 1
ids_to_process.join()
# Calculate the total play time for each
# user and add the users to the graph
graph = nx.Graph()
for steam_id, data in user_data.items():
data["total_playtime"] = sum(data["games"].values())
graph.add_node(steam_id)
# Keep track of the number of links between users with different
# game interests and the total number of links for calculating homophily
cross_links = 0
total_links = 0
# Add edges of different colors between friends based
# on the similarity of their taste in games
for user, data in user_data.items():
for friend in data["friends"]:
# If we have game info for the friend and there isn't already an edge between them
if friend in user_data and friend not in graph[user]:
# Calculate the amount of time spent playing games in common
common_playtime = 0
for appid, friend_playtime in user_data[friend]["games"].items():
if appid in user_data[user]["games"]:
common_playtime += 2 * min(friend_playtime, user_data[user]["games"][appid])
# Calculate similarity ratio
total_playtime = user_data[user]["total_playtime"] + user_data[friend]["total_playtime"]
similarity = float(common_playtime) / total_playtime if total_playtime > 0 else 0
# Add an edge between the user and their friend with
# a different color to indicate whether they have
# very similar interests, similar interests,
# or not similar interests
if similarity > .25:
graph.add_edge(user, friend, color='r')
elif similarity > .125:
graph.add_edge(user, friend, color='b')
else:
cross_links += 1
graph.add_edge(user, friend, color='y')
total_links += 1
# Calculate and print homophily value
print "Homophily index (max of 1) of this graph is: "+(str(1 - float(cross_links) / total_links) if total_links > 0 else "0")
# Display the graph
pos = nx.shell_layout(graph)
edges = graph.edges()
edge_colors = [graph[u][v]['color'] for u, v in edges]
node_colors = ['black' for node in graph.nodes()]
node_sizes = [10 for node in graph.nodes()]
nx.draw(graph, pos, edges=edges, edge_color=edge_colors, node_color=node_colors, node_size=node_sizes)
plt.show()
|
demo2.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys, os
import time
from pynfc import *
from threading import Thread
RED='\033[01;31m'
GREEN='\033[01;32m'
NC='\033[0m' # No Color
def read_adc_channel(channel):
n_times = 10
raw_sum = 0
for i in xrange(n_times):
raw = int(open('/sys/bus/iio/devices/iio:device0/in_voltage%d_raw' % channel).read())
raw_sum += raw
raw_mean = float(raw_sum) / n_times
return raw_mean / 4096. * 1.85
def get_vin():
return read_adc_channel(1) * 7.061
def get_vbat():
return read_adc_channel(7) * 4
def read_ultralight(n):
hex_data = ''
for i in xrange(4):
status, data = n.sendAPDU(['30', hex(i * 4)[2:].zfill(2)])
hex_data += data
return hex_data
RED_LED = 3
GREEN_LED = 2
BUZZER = 33
#~ BUZZER = 32
LOCK_GPIO = 60
EXIT_BUTTON_GPIO = 4
def export_gpio(gpio):
open('/sys/class/gpio/export','wt').write("%d\n" % gpio)
def gpio_set_direction(gpio, direction):
open('/sys/class/gpio/gpio%d/direction' % gpio,'wt').write(direction + "\n")
def init_gpios():
for gpio in (RED_LED, GREEN_LED, BUZZER, LOCK_GPIO):
export_gpio(gpio)
gpio_set_direction(gpio, "out")
for gpio in (EXIT_BUTTON_GPIO,):
export_gpio(gpio)
gpio_set_direction(gpio, "in")
def gpio_set_value(gpio, value):
open('/sys/class/gpio/gpio%d/value' % gpio,'wt').write("%d\n" % value)
def gpio_get_value(gpio):
return bool(int(open('/sys/class/gpio/gpio%d/value' % gpio).read().strip()))
def handle_exit_button():
while True:
#~ print "handle_exit_button iter"
value = gpio_get_value(EXIT_BUTTON_GPIO)
if not value:
grant_access()
time.sleep(1)
def grant_access():
""" Opens lock for a couple of seconds"""
print "grant access"
gpio_set_value(RED_LED, 0)
gpio_set_value(GREEN_LED, 1)
gpio_set_value(BUZZER, 1)
gpio_set_value(LOCK_GPIO, 0) # open lock
time.sleep(0.5)
gpio_set_value(BUZZER, 0)
time.sleep(3)
gpio_set_value(LOCK_GPIO, 1) # close lock
gpio_set_value(GREEN_LED, 0)
gpio_set_value(RED_LED, 1)
if __name__ == '__main__':
init_gpios()
exit_button_thread = Thread(target = handle_exit_button)
exit_button_thread.daemon = True
exit_button_thread.start()
nfc = NFC(0) # Select first NFC device
nfc.powerOn()
prev_result = None
led_color = 0
gpio_set_value(RED_LED, 1)
gpio_set_value(GREEN_LED, 0)
gpio_set_value(BUZZER, 0)
gpio_set_value(LOCK_GPIO, 1)
while True:
# Select card
c = nfc.selectISO14443A()
access_granted = False
card_result = c.uid if c else None
if card_result != prev_result:
print "\033c"
if c:
print GREEN + "Card: " + NC + "[%s] %s" % (c.atqa, c.uid)
if c.atqa == '0044':
print "Found Mifare Ultralight card"
try:
ul_data = read_ultralight(nfc)
metro_num = int(ul_data[37:45], 16)
print 'Metro UL card: ' + RED + str(metro_num) + NC
if (metro_num % 2 == 0):
access_granted = True
except:
print "Error"
if access_granted:
print "access granted"
grant_access()
print "end"
else:
for i in xrange(3):
gpio_set_value(BUZZER, 1)
gpio_set_value(RED_LED, 1)
time.sleep(0.1)
gpio_set_value(BUZZER, 0)
gpio_set_value(RED_LED, 0)
time.sleep(0.1)
gpio_set_value(RED_LED, 1)
else:
print "No card in field"
led_color = 0
prev_result = card_result
|
restful_template.py | from befh.restful_api_socket import RESTfulApiSocket
from befh.exchanges.gateway import ExchangeGateway
from befh.market_data import L2Depth, Trade
from befh.util import Logger
from befh.instrument import Instrument
from befh.clients.sql_template import SqlClientTemplate
from functools import partial
from datetime import datetime
from threading import Thread
import time
class ExchGwApiTemplate(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_timestamp_offset(cls):
return 1
@classmethod
def get_order_book_timestamp_field_name(cls):
return 'date'
@classmethod
def get_trades_timestamp_field_name(cls):
return 'date'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_trade_side_field_name(cls):
return 'type'
@classmethod
def get_trade_id_field_name(cls):
return 'tid'
@classmethod
def get_trade_price_field_name(cls):
return 'price'
@classmethod
def get_trade_volume_field_name(cls):
return 'amount'
@classmethod
def get_order_book_link(cls, instmt):
return "https://data.btcchina.com/data/orderbook?limit=5&market=%s" % instmt.get_instmt_code()
@classmethod
def get_trades_link(cls, instmt):
if int(instmt.get_exch_trade_id()) > 0:
return "https://data.btcchina.com/data/historydata?market=%s&since=%s" % \
(instmt.get_instmt_code(), instmt.get_exch_trade_id())
else:
return "https://data.btcchina.com/data/historydata?limit=100&market=%s" % \
(instmt.get_instmt_code())
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
l2_depth = L2Depth()
keys = list(raw.keys())
if cls.get_order_book_timestamp_field_name() in keys and \
cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
# Date time
date_time = float(raw[cls.get_order_book_timestamp_field_name()])
date_time = date_time / cls.get_timestamp_offset()
l2_depth.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Bids
bids = raw[cls.get_bids_field_name()]
bids = sorted(bids, key=lambda x: x[0], reverse=True)
for i in range(0, 5):
l2_depth.bids[i].price = float(bids[i][0]) if not isinstance(bids[i][0], float) else bids[i][0]
l2_depth.bids[i].volume = float(bids[i][1]) if not isinstance(bids[i][1], float) else bids[i][1]
# Asks
asks = raw[cls.get_asks_field_name()]
asks = sorted(asks, key=lambda x: x[0])
for i in range(0, 5):
l2_depth.asks[i].price = float(asks[i][0]) if not isinstance(asks[i][0], float) else asks[i][0]
l2_depth.asks[i].volume = float(asks[i][1]) if not isinstance(asks[i][1], float) else asks[i][1]
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return l2_depth
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trades_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = float(raw[cls.get_trades_timestamp_field_name()])
date_time = date_time / cls.get_timestamp_offset()
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = 1
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt))
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
res = cls.request(link)
trades = []
if len(res) > 0:
for t in res:
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwTemplate(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiTemplate(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Template'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = Thread(target=partial(self.get_order_book_worker, instmt))
t2 = Thread(target=partial(self.get_trades_worker, instmt))
t1.start()
t2.start()
return [t1, t2]
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'Template'
instmt_name = 'BTCCNY'
instmt_code = 'btccny'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
exch = ExchGwTemplate([db_client])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_recovered(False)
exch.get_order_book_worker(instmt)
exch.get_trades_worker(instmt)
|
libipmi.py | import threading
import sqlite3 as sql
import subprocess
import time
import csv
import shutil
from powerboard import summary_v2
import tensorflow as tf
import os
time_start = 0
glob_var = True
thread_list = []
# Stop the data reading using a global variable
def stop():
global glob_var
glob_var = False
# Function for threading the data saving on the Database
def saveDB(single_buffer, threadID):
print("\n" + "ThreadID:" + str(threadID) + "\n")
conn = sql.connect('ipmi_data.db')
c = conn.cursor()
for inst in single_buffer:
newRow = " " + "(" + "\'" + str(inst[0].replace('\n', '\\n')) + "\'" + "," + str(inst[1]) + ")"
newRow = "INSERT INTO SensorData VALUES" + newRow
c.execute(newRow)
conn.commit()
conn.close()
# The function in which runs the loop that collects the data
def mainThread():
global glob_var
global thread_list
# Delete current database file
try:
os.remove("ipmi_data.db")
except OSError:
pass
# Create database
conn = sql.connect('ipmi_data.db')
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS SensorData")
c.execute("CREATE TABLE SensorData (sensorRead TEXT, dataTime FLOAT)")
conn.commit()
# Create buffers
buffers = []
buffers.append([])
buffers.append([])
# Create thread
#thread = saveDB()
# Complete buffer, after buffer is completed, use the other one while
# the first one is being saved on the database
bufferNumber = 0
i = 0
while (glob_var):
# Get sensor value from ipmi
sens = 'Total Power'
command = ['sudo', 'ipmitool', 'sensor', 'get', sens]
process = subprocess.run(
command,
stdout=subprocess.PIPE,
universal_newlines=True)
output = process.stdout
output = output.replace('\n', '\\n')
# Append on buffer
buffers[bufferNumber].append([output, time.time() - time_start])
# Checks if the current buffer is full
if len(buffers[bufferNumber]) > 10:
#thread.start(buffers[bufferNumber], bufferNumber)
#thread.run(buffers[bufferNumber], bufferNumber)
#buffers[bufferNumber] = []
thread = threading.Thread(target=saveDB, args=(buffers[bufferNumber], bufferNumber,))
thread_list.append(thread)
thread.start()
if bufferNumber == 0:
bufferNumber = 1
else:
bufferNumber = 0
buffers[bufferNumber] = []
i += 1
# At the end of process, saves the buffer data remaining in the database.
if len(buffers[0]) > 0:
conn = sql.connect('ipmi_data.db')
c = conn.cursor()
for inst in buffers[0]:
newRow = " " + "(" + "\'" + str(inst[0].replace('\n', '\\n')) + "\'" + "," + str(inst[1]) + ")"
newRow = "INSERT INTO SensorData VALUES" + newRow
c.execute(newRow)
conn.commit()
conn.close()
elif len(buffers[1]) > 0:
conn = sql.connect('ipmi_data.db')
c = conn.cursor()
for inst in buffers[1]:
newRow = " " + "(" + "\'" + str(inst[0].replace('\n', '\\n')) + "\'" + "," + str(inst[1]) + ")"
newRow = "INSERT INTO SensorData VALUES" + newRow
c.execute(newRow)
conn.commit()
conn.close()
# Starts the main thread and sets the timestamp
def start():
command = ['sudo', 'echo']
process = subprocess.run(
command,
stdout=subprocess.PIPE,
universal_newlines=True)
global time_start
time_start = time.time()
main_thread = threading.Thread(target=mainThread)
main_thread.start()
# Saves the raw database data on a nice CSV file
def dbToCSV(path):
try:
os.mkdir(path)
except:
print ("Creation of the directory %s failed" % path)
else:
print ("Successfully created the directory %s " % path)
writer = tf.summary.create_file_writer("demo_logs")
with writer.as_default():
summary_v2.greeting("ipmi_data.csv",path, step=0)
conn = sql.connect('ipmi_data.db')
c = conn.cursor()
sens = 'Total Power'
with open('ipmi_data' + '.csv', 'w') as file_out:
write = csv.writer(file_out)
first_row = ['Sensor_ID', 'Entity_ID', 'Sensor_Type_Threshold_',
'Sensor_Reading', 'Status',
'Lower_Non_Recoverable', 'Lower_Critical',
'Lower_Non_Critical', 'Upper_Non_Critical', 'Upper_Critical',
'Upper_Non_Recoverable', 'Positive_Hysteresis',
'Negative_Hysteresis', 'Assertion_Events',
'Assertions_Enabled', 'Time_elapsed']
write.writerow(first_row)
for db_row in c.execute("SELECT * FROM SensorData"):
output = str(db_row[0])
read_time_value = float(db_row[1])
output = output.replace('\\n', '\n')
output = output.split('\n')[2:-2]
current_row = []
current_row.append(sens)
for i in range(len(output)):
output[i] = output[i].split(':')
output[i][0] = output[i][0].replace(' ', '')
for j in range(1,len(output[i])):
output[i][j] = output[i][j].split()
if len(output[i][j]) > 0:
current_row.append(output[i][j][0])
else:
current_row.append('Empty_string')
current_row.append("{:.5f}".format(read_time_value))
#print(current_row)
#print(current_row)
write.writerow(current_row)
#add the CSV file in a path passed in argument
shutil.copy("ipmi_data.csv",path)
|
chatcommunicate.py | from chatexchange import events
from chatexchange.browser import LoginError
from chatexchange.messages import Message
from chatexchange_extension import Client
import collections
import itertools
import os
import os.path
import pickle
import queue
import regex
import requests
import sys
import threading
import time
import yaml
import datahandling
import metasmoke
import classes.feedback
from helpers import log
from excepthook import log_exception
from globalvars import GlobalVars
from parsing import fetch_post_id_and_site_from_url, fetch_post_url_from_msg_content, fetch_owner_url_from_msg_content
from tasks import Tasks
from socketscience import SocketScience
LastMessages = collections.namedtuple("LastMessages", ["messages", "reports"])
class RoomData:
def __init__(self, room, block_time, deletion_watcher):
self.room = room
self.block_time = block_time
self.deletion_watcher = deletion_watcher
class CmdException(Exception):
pass
_prefix_commands = {}
_reply_commands = {}
_clients = {
"stackexchange.com": None,
"stackoverflow.com": None,
"meta.stackexchange.com": None
}
_command_rooms = set()
_watcher_rooms = set()
_room_roles = {}
_privileges = {}
_global_block = -1
_rooms = {}
_last_messages = LastMessages({}, collections.OrderedDict())
_msg_queue = queue.Queue()
_pickle_run = threading.Event()
def init(username, password, try_cookies=True):
global _clients
global _rooms
global _room_data
global _last_messages
for site in _clients.keys():
client = Client(site)
logged_in = False
if try_cookies:
if GlobalVars.cookies is None:
datahandling._remove_pickle("cookies.p")
GlobalVars.cookies = {}
else:
cookies = GlobalVars.cookies
try:
if site in cookies and cookies[site] is not None:
client.login_with_cookie(cookies[site])
logged_in = True
log('debug', 'Logged in using cached cookies')
except LoginError as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
log('debug', 'Login error {}: {}'.format(exc_type.__name__, exc_obj))
log('debug', 'Falling back to credential-based login')
del cookies[site]
datahandling.dump_cookies()
if not logged_in:
for retry in range(3):
try:
GlobalVars.cookies[site] = client.login(username, password)
break
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
log('debug', 'Login error {}: {}'.format(exc_type.__name__, exc_obj))
else:
raise Exception("Failed to log into " + site + ", max retries exceeded")
_clients[site] = client
if os.path.exists("rooms_custom.yml"):
parse_room_config("rooms_custom.yml")
else:
parse_room_config("rooms.yml")
if not GlobalVars.standby_mode:
join_command_rooms()
if os.path.isfile("messageData.p"):
try:
_last_messages = pickle.load(open("messageData.p", "rb"))
except EOFError:
pass
threading.Thread(name="pickle ---rick--- runner", target=pickle_last_messages, daemon=True).start()
threading.Thread(name="message sender", target=send_messages, daemon=True).start()
if try_cookies:
datahandling.dump_cookies()
def join_command_rooms():
for site, roomid in _command_rooms:
room = _clients[site].get_room(roomid)
deletion_watcher = (site, roomid) in _watcher_rooms
room.join()
room.watch_socket(on_msg)
_rooms[(site, roomid)] = RoomData(room, -1, deletion_watcher)
def parse_room_config(path):
with open(path, "r") as room_config:
room_dict = yaml.load(room_config.read())
with open("users.yml", "r") as user_config:
user_data = yaml.load(user_config.read())
inherits = []
rooms = {}
host_fields = {'stackexchange.com': 1, 'meta.stackexchange.com': 2, 'stackoverflow.com': 3}
for site, site_rooms in room_dict.items():
for roomid, room in site_rooms.items():
room_identifier = (site, roomid)
print("Process {}".format(room_identifier))
rooms[room_identifier] = room
if "privileges" in room and "inherit" in room["privileges"]:
inherits.append({'from': (room["privileges"]["inherit"]["site"],
room["privileges"]["inherit"]["room"]), 'to': room_identifier})
if "additional" in room["privileges"]:
_privileges[room_identifier] =\
set([user_data[x][host_fields[site]] for x in room["privileges"]["additional"]])
elif "privileges" in room:
_privileges[room_identifier] = set([user_data[x][host_fields[site]] for x in room["privileges"]])
else:
_privileges[room_identifier] = set()
if "commands" in room and room["commands"]:
_command_rooms.add(room_identifier)
if "watcher" in room and room["watcher"]:
_watcher_rooms.add(room_identifier)
if "msg_types" in room:
add_room(room_identifier, room["msg_types"])
for inherit in inherits:
if inherit["from"] in rooms:
from_privs = _privileges[inherit["from"]]
from_accounts = [k for k, v in user_data.items() if v[host_fields[inherit["from"][0]]] in from_privs]
inherit_from = set([user_data[x][host_fields[inherit["to"][0]]] for x in from_accounts])
if inherit["to"] in _privileges:
before = _privileges[inherit["to"]]
_privileges[inherit["to"]] = _privileges[inherit["to"]] | inherit_from
log('debug', '{} inheriting privs from {} with additional: before {}, after {}'.format(
inherit["to"], inherit["from"], before, _privileges[inherit["to"]]))
else:
_privileges[inherit["to"]] = inherit_from
else:
log('warn', 'Room {} on {} specified privilege inheritance from {}, but no such room exists'.format(
inherit["to"][1], inherit["to"][1], inherit["from"][1]))
def add_room(room, roles):
for role in roles:
if role not in _room_roles:
_room_roles[role] = set()
_room_roles[role].add(room)
def pickle_last_messages():
while True:
_pickle_run.wait()
_pickle_run.clear()
with open("messageData.p", "wb") as pickle_file:
pickle.dump(_last_messages, pickle_file)
def send_messages():
while True:
room, msg, report_data = _msg_queue.get()
if len(msg) > 500 and "\n" not in msg:
log('warn', 'Discarded the following message because it was over 500 characters')
log('warn', msg)
_msg_queue.task_done()
continue
full_retries = 0
while full_retries < 3:
try:
response = room.room._client._do_action_despite_throttling(("send", room.room.id, msg)).json()
if "id" in response:
identifier = (room.room._client.host, room.room.id)
message_id = response["id"]
if identifier not in _last_messages.messages:
_last_messages.messages[identifier] = collections.deque((message_id,))
else:
last = _last_messages.messages[identifier]
if len(last) > 100:
last.popleft()
last.append(message_id)
if report_data:
_last_messages.reports[(room.room._client.host, message_id)] = report_data
if len(_last_messages.reports) > 50:
_last_messages.reports.popitem(last=False)
if room.deletion_watcher:
callback = room.room._client.get_message(message_id).delete
GlobalVars.deletion_watcher.subscribe(report_data[0], callback=callback, timeout=120)
_pickle_run.set()
break
except requests.exceptions.HTTPError:
full_retries += 1
_msg_queue.task_done()
def on_msg(msg, client):
global _room_roles
if not isinstance(msg, events.MessagePosted) and not isinstance(msg, events.MessageEdited):
return
message = msg.message
room_ident = (client.host, message.room.id)
room_data = _rooms[room_ident]
if message.owner.id == client._br.user_id:
if 'direct' in _room_roles and room_ident in _room_roles['direct']:
SocketScience.receive(message.content_source.replace("\u200B", "").replace("\u200C", ""))
return
if message.content.startswith("<div class='partial'>"):
message.content = message.content[21:]
if message.content.endswith("</div>"):
message.content = message.content[:-6]
if message.parent:
try:
if message.parent.owner.id == client._br.user_id:
strip_mention = regex.sub("^(<span class=(\"|')mention(\"|')>)?@.*?(</span>)? ", "", message.content)
cmd = GlobalVars.parser.unescape(strip_mention)
result = dispatch_reply_command(message.parent, message, cmd)
if result:
_msg_queue.put((room_data, ":{} {}".format(message.id, result), None))
except ValueError:
pass
elif message.content.lower().startswith("sd "):
result = dispatch_shorthand_command(message)
if result:
_msg_queue.put((room_data, ":{} {}".format(message.id, result), None))
elif message.content.startswith("!!/"):
result = dispatch_command(message)
if result:
_msg_queue.put((room_data, ":{} {}".format(message.id, result), None))
elif classes.feedback.FEEDBACK_REGEX.search(message.content) \
and is_privileged(message.owner, message.room) and datahandling.last_feedbacked:
ids, expires_in = datahandling.last_feedbacked
if time.time() < expires_in:
Tasks.do(metasmoke.Metasmoke.post_auto_comment, message.content_source, message.owner, ids=ids)
elif 'direct' in _room_roles and room_ident in _room_roles['direct']:
SocketScience.receive(message.content_source.replace("\u200B", "").replace("\u200C", ""))
def tell_rooms_with(prop, msg, notify_site="", report_data=None):
tell_rooms(msg, (prop,), (), notify_site=notify_site, report_data=report_data)
def tell_rooms_without(prop, msg, notify_site="", report_data=None):
tell_rooms(msg, (), (prop,), notify_site=notify_site, report_data=report_data)
def tell_rooms(msg, has, hasnt, notify_site="", report_data=None):
global _rooms
msg = msg.rstrip()
target_rooms = set()
for prop_has in has:
if isinstance(prop_has, tuple):
target_rooms.add(prop_has)
if prop_has not in _room_roles:
continue
for room in _room_roles[prop_has]:
if all(map(lambda prop: prop not in _room_roles or room not in _room_roles[prop], hasnt)):
if room not in _rooms:
site, roomid = room
deletion_watcher = room in _watcher_rooms
new_room = _clients[site].get_room(roomid)
new_room.join()
_rooms[room] = RoomData(new_room, -1, deletion_watcher)
target_rooms.add(room)
for room_id in target_rooms:
room = _rooms[room_id]
if notify_site:
pings = datahandling.get_user_names_on_notification_list(room.room._client.host,
room.room.id,
notify_site,
room.room._client)
msg_pings = datahandling.append_pings(msg, pings)
else:
msg_pings = msg
timestamp = time.time()
if room.block_time < timestamp and _global_block < timestamp:
if report_data and "delay" in _room_roles and room_id in _room_roles["delay"]:
def callback(room=room, msg=msg_pings):
post = fetch_post_id_and_site_from_url(report_data[0])[0:2]
if not datahandling.is_false_positive(post) and not datahandling.is_ignored_post(post):
_msg_queue.put((room, msg, report_data))
task = Tasks.later(callback, after=300)
GlobalVars.deletion_watcher.subscribe(report_data[0], callback=task.cancel)
else:
_msg_queue.put((room, msg_pings, report_data))
def get_last_messages(room, count):
identifier = (room._client.host, room.id)
if identifier not in _last_messages.messages:
return
for msg_id in itertools.islice(reversed(_last_messages.messages[identifier]), count):
yield room._client.get_message(msg_id)
def get_report_data(message):
identifier = (message._client.host, message.id)
if identifier in _last_messages.reports:
return _last_messages.reports[identifier]
else:
post_url = fetch_post_url_from_msg_content(message.content_source)
if post_url:
return (post_url, fetch_owner_url_from_msg_content(message.content_source))
def is_privileged(user, room):
print(_privileges)
return user.id in _privileges[(room._client.host, room.id)] or user.is_moderator
def block_room(room_id, site, time):
global _global_block
if room_id is None:
_global_block = time
else:
_rooms[(site, room_id)].block_time = time
class ChatCommand:
def __init__(self, type_signature, reply=False, whole_msg=False, privileged=False,
arity=None, aliases=None, give_name=False):
self.type_signature = type_signature
self.reply = reply
self.whole_msg = whole_msg
self.privileged = privileged
self.arity = arity
self.aliases = aliases or []
self.give_name = give_name
self.__func__ = None
def __call__(self, *args, original_msg=None, alias_used=None, quiet_action=False):
if self.privileged and not is_privileged(original_msg.owner, original_msg.room):
return GlobalVars.not_privileged_warning
if self.whole_msg:
processed_args = [original_msg]
else:
processed_args = []
try:
try:
processed_args.extend(
[(coerce(arg) if arg else arg) for coerce, arg in zip(self.type_signature, args)])
except ValueError as e:
return "Invalid input type given for an argument"
if self.give_name:
result = self.__func__(*processed_args, alias_used=alias_used)
else:
result = self.__func__(*processed_args)
return result if not quiet_action else ""
except CmdException as e:
return str(e)
except Exception: # Everything else
log_exception(*sys.exc_info())
return "I hit an error while trying to run that command; run `!!/errorlogs` for details."
def __repr__(self):
return "{}({}, reply={}, whole_msg={}, privileged={}, arity={}, aliases={}, give_name={})" \
.format(
self.__class__.__name__, ", ".join([s.__name__ for s in self.type_signature]), self.reply,
self.whole_msg, self.privileged,
self.arity, self.aliases, self.give_name
)
def command(*type_signature, reply=False, whole_msg=False, privileged=False, arity=None, aliases=None, give_name=False):
aliases = aliases or []
def decorator(func):
f = ChatCommand(type_signature, reply, whole_msg, privileged, arity, aliases, give_name)
f.__func__ = func
cmd = (f, arity if arity else (len(type_signature), len(type_signature)))
if reply:
_reply_commands[func.__name__] = cmd
for alias in aliases:
_reply_commands[alias] = cmd
else:
_prefix_commands[func.__name__] = cmd
for alias in aliases:
_prefix_commands[alias] = cmd
return f
return decorator
def message(msg):
assert isinstance(msg, Message)
return msg
def get_message(id, host="stackexchange.com"):
if host not in _clients:
raise ValueError("Invalid host")
return _clients[host].get_message(int(id))
def dispatch_command(msg):
command_parts = GlobalVars.parser.unescape(msg.content).split(" ", 1)
if len(command_parts) == 2:
cmd, args = command_parts
else:
cmd, = command_parts
args = ""
if len(cmd) == 3:
return
command_name = cmd[3:].lower()
quiet_action = command_name[-1] == "-"
command_name = regex.sub(r"[[:punct:]]*$", "", command_name)
if command_name not in _prefix_commands:
return "No such command '{}'.".format(command_name)
else:
func, (min_arity, max_arity) = _prefix_commands[command_name]
if max_arity == 0:
return func(original_msg=msg, alias_used=command_name, quiet_action=quiet_action)
elif max_arity == 1:
if min_arity == 1 and not args:
return "Missing an argument."
return func(args or None, original_msg=msg, alias_used=command_name, quiet_action=quiet_action)
else:
args = args.split()
if len(args) < min_arity:
return "Too few arguments."
elif len(args) > max_arity:
return "Too many arguments."
else:
args.extend([None] * (max_arity - len(args)))
return func(*args, original_msg=msg, alias_used=command_name, quiet_action=quiet_action)
def dispatch_reply_command(msg, reply, full_cmd):
command_parts = full_cmd.lower().split(" ", 1)
if len(command_parts) == 2:
cmd, args = command_parts
else:
cmd, = command_parts
args = ""
quiet_action = cmd[-1] == "-"
cmd = regex.sub(r"\W*$", "", cmd)
if cmd in _reply_commands:
func, (min_arity, max_arity) = _reply_commands[cmd]
assert min_arity == 1
if max_arity == 1:
return func(msg, original_msg=reply, alias_used=cmd, quiet_action=quiet_action)
elif max_arity == 2:
return func(msg, args, original_msg=reply, alias_used=cmd, quiet_action=quiet_action)
else:
args = args.split()
args.extend([None] * (max_arity - len(args)))
return func(msg, *args, original_msg=reply, alias_used=cmd, quiet_action=quiet_action)
elif is_privileged(reply.owner, reply.room):
post_data = get_report_data(msg)
if post_data:
Tasks.do(metasmoke.Metasmoke.post_auto_comment, full_cmd, reply.owner, url=post_data[0])
def dispatch_shorthand_command(msg):
commands = GlobalVars.parser.unescape(msg.content[3:]).lower().split()
if len(commands) == 0:
return
output = []
processed_commands = []
for cmd in commands:
count, cmd = regex.match(r"^(\d*)(.*)", cmd).groups()
for _ in range(int(count) if count else 1):
processed_commands.append(cmd)
should_return_output = False
for current_command, message in zip(processed_commands, get_last_messages(msg.room, len(processed_commands))):
if current_command == "-":
output.append("[:{}] <skipped>".format(message.id))
else:
result = dispatch_reply_command(message, msg, current_command)
if result:
should_return_output = True
output.append("[:{}] {}".format(message.id, result))
else:
output.append("[:{}] <processed without return value>".format(message.id))
return "\n".join(output) if should_return_output else ""
|
server.py | #########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import traceback
import tempfile
import re
import collections
import json
import threading
import socket
from Queue import Queue
from StringIO import StringIO
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from wsgiref.simple_server import make_server as make_wsgi_server
import bottle
from cloudify.proxy.client import ScriptException
from cloudify.state import current_ctx
class CtxProxy(object):
def __init__(self, ctx, socket_url):
self.ctx = ctx
self.socket_url = socket_url
def process(self, request):
try:
typed_request = json.loads(request)
args = typed_request['args']
payload = process_ctx_request(self.ctx, args)
result_type = 'result'
if isinstance(payload, ScriptException):
payload = dict(message=str(payload))
result_type = 'stop_operation'
result = json.dumps({
'type': result_type,
'payload': payload
})
except Exception, e:
tb = StringIO()
traceback.print_exc(file=tb)
payload = {
'type': type(e).__name__,
'message': str(e),
'traceback': tb.getvalue()
}
result = json.dumps({
'type': 'error',
'payload': payload
})
return result
def close(self):
pass
class HTTPCtxProxy(CtxProxy):
def __init__(self, ctx, port=None):
port = port or get_unused_port()
socket_url = 'http://localhost:{0}'.format(port)
super(HTTPCtxProxy, self).__init__(ctx, socket_url)
self.port = port
self._started = Queue(1)
self.thread = self._start_server()
self._started.get(timeout=5)
def _start_server(self):
proxy = self
class BottleServerAdapter(bottle.ServerAdapter):
def run(self, app):
class Server(WSGIServer):
allow_reuse_address = True
def handle_error(self, request, client_address):
pass
class Handler(WSGIRequestHandler):
def address_string(self):
return self.client_address[0]
def log_request(*args, **kwargs):
if not self.quiet:
return WSGIRequestHandler.log_request(
*args, **kwargs)
self.srv = make_wsgi_server(
self.host,
self.port,
app,
Server,
Handler)
proxy.server = self.srv
self.port = self.srv.server_port
proxy._started.put(True)
self.srv.serve_forever(poll_interval=0.1)
bottle.post('/', callback=self._request_handler)
def serve():
bottle.run(
host='localhost',
port=self.port,
quiet=True,
server=BottleServerAdapter)
thread = threading.Thread(target=serve)
thread.daemon = True
thread.start()
return thread
def close(self):
self.server.shutdown()
self.server.server_close()
def _request_handler(self):
request = bottle.request.body.read()
with current_ctx.push(self.ctx):
response = self.process(request)
return bottle.LocalResponse(
body=response,
status=200,
headers={'content-type': 'application/json'})
class ZMQCtxProxy(CtxProxy):
def __init__(self, ctx, socket_url):
super(ZMQCtxProxy, self).__init__(ctx, socket_url)
import zmq
self.z_context = zmq.Context(io_threads=1)
self.sock = self.z_context.socket(zmq.REP)
self.sock.bind(self.socket_url)
self.poller = zmq.Poller()
self.poller.register(self.sock, zmq.POLLIN)
def poll_and_process(self, timeout=1):
import zmq
state = dict(self.poller.poll(1000 * timeout)).get(self.sock)
if not state == zmq.POLLIN:
return False
request = self.sock.recv()
response = self.process(request)
self.sock.send(response)
return True
def close(self):
self.sock.close()
self.z_context.term()
class UnixCtxProxy(ZMQCtxProxy):
def __init__(self, ctx, socket_path=None):
if not socket_path:
socket_path = tempfile.mktemp(prefix='ctx-', suffix='.socket')
socket_url = 'ipc://{0}'.format(socket_path)
super(UnixCtxProxy, self).__init__(ctx, socket_url)
class TCPCtxProxy(ZMQCtxProxy):
def __init__(self, ctx, ip='127.0.0.1', port=None):
port = port or get_unused_port()
socket_url = 'tcp://{0}:{1}'.format(ip, port)
super(TCPCtxProxy, self).__init__(ctx, socket_url)
class StubCtxProxy(object):
socket_url = ''
def close(self):
pass
def process_ctx_request(ctx, args):
current = ctx
num_args = len(args)
index = 0
while index < num_args:
arg = args[index]
desugared_attr = _desugar_attr(current, arg)
if desugared_attr:
current = getattr(current, desugared_attr)
elif isinstance(current, collections.MutableMapping):
key = arg
path_dict = PathDictAccess(current)
if index + 1 == num_args:
# read dict prop by path
value = path_dict.get(key)
current = value
elif index + 2 == num_args:
# set dict prop by path
value = args[index + 1]
current = path_dict.set(key, value)
else:
raise RuntimeError('Illegal argument while accessing dict')
break
elif callable(current):
kwargs = {}
remaining_args = args[index:]
if isinstance(remaining_args[-1], collections.MutableMapping):
kwargs = remaining_args[-1]
remaining_args = remaining_args[:-1]
current = current(*remaining_args, **kwargs)
break
else:
raise RuntimeError('{0} cannot be processed in {1}'
.format(arg, args))
index += 1
if callable(current):
current = current()
return current
def _desugar_attr(obj, attr):
if not isinstance(attr, basestring):
return None
if hasattr(obj, attr):
return attr
attr = attr.replace('-', '_')
if hasattr(obj, attr):
return attr
return None
class PathDictAccess(object):
pattern = re.compile(r"(.+)\[(\d+)\]")
def __init__(self, obj):
self.obj = obj
def set(self, prop_path, value):
obj, prop_name = self._get_parent_obj_prop_name_by_path(prop_path)
obj[prop_name] = value
def get(self, prop_path):
value = self._get_object_by_path(prop_path)
return value
def _get_object_by_path(self, prop_path, fail_on_missing=True):
# when setting a nested object, make sure to also set all the
# intermediate path objects
current = self.obj
for prop_segment in prop_path.split('.'):
match = self.pattern.match(prop_segment)
if match:
index = int(match.group(2))
property_name = match.group(1)
if property_name not in current:
self._raise_illegal(prop_path)
if type(current[property_name]) != list:
self._raise_illegal(prop_path)
current = current[property_name][index]
else:
if prop_segment not in current:
if fail_on_missing:
self._raise_illegal(prop_path)
else:
current[prop_segment] = {}
current = current[prop_segment]
return current
def _get_parent_obj_prop_name_by_path(self, prop_path):
split = prop_path.split('.')
if len(split) == 1:
return self.obj, prop_path
parent_path = '.'.join(split[:-1])
parent_obj = self._get_object_by_path(parent_path,
fail_on_missing=False)
prop_name = split[-1]
return parent_obj, prop_name
@staticmethod
def _raise_illegal(prop_path):
raise RuntimeError('illegal path: {0}'.format(prop_path))
def get_unused_port():
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
_, port = sock.getsockname()
sock.close()
return port
|
misc.py | """
Misc module contains stateless functions that could be used during pytest execution,
or outside during setup/teardown of the integration tests environment.
"""
import contextlib
import errno
import multiprocessing
import os
import re
import shutil
import stat
import sys
import tempfile
import time
import warnings
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.hazmat.primitives.serialization import NoEncryption
from cryptography.hazmat.primitives.serialization import PrivateFormat
from cryptography.x509 import load_pem_x509_certificate
from OpenSSL import crypto
import pkg_resources
import requests
from six.moves import SimpleHTTPServer
from six.moves import socketserver
from certbot_integration_tests.utils.constants import \
PEBBLE_ALTERNATE_ROOTS, PEBBLE_MANAGEMENT_URL
RSA_KEY_TYPE = 'rsa'
ECDSA_KEY_TYPE = 'ecdsa'
def _suppress_x509_verification_warnings():
try:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError:
# Handle old versions of request with vendorized urllib3
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def check_until_timeout(url, attempts=30):
"""
Wait and block until given url responds with status 200, or raise an exception
after the specified number of attempts.
:param str url: the URL to test
:param int attempts: the number of times to try to connect to the URL
:raise ValueError: exception raised if unable to reach the URL
"""
_suppress_x509_verification_warnings()
for _ in range(attempts):
time.sleep(1)
try:
if requests.get(url, verify=False).status_code == 200:
return
except requests.exceptions.ConnectionError:
pass
raise ValueError('Error, url did not respond after {0} attempts: {1}'.format(attempts, url))
class GracefulTCPServer(socketserver.TCPServer):
"""
This subclass of TCPServer allows graceful reuse of an address that has
just been released by another instance of TCPServer.
"""
allow_reuse_address = True
def _run_server(port):
GracefulTCPServer(('', port), SimpleHTTPServer.SimpleHTTPRequestHandler).serve_forever()
@contextlib.contextmanager
def create_http_server(port):
"""
Setup and start an HTTP server for the given TCP port.
This server stays active for the lifetime of the context, and is automatically
stopped with context exit, while its temporary webroot is deleted.
:param int port: the TCP port to use
:return str: the temporary webroot attached to this server
"""
current_cwd = os.getcwd()
webroot = tempfile.mkdtemp()
process = multiprocessing.Process(target=_run_server, args=(port,))
try:
# SimpleHTTPServer is designed to serve files from the current working directory at the
# time it starts. So we temporarily change the cwd to our crafted webroot before launch.
try:
os.chdir(webroot)
process.start()
finally:
os.chdir(current_cwd)
check_until_timeout('http://localhost:{0}/'.format(port))
yield webroot
finally:
try:
if process.is_alive():
process.terminate()
process.join() # Block until process is effectively terminated
finally:
shutil.rmtree(webroot)
def list_renewal_hooks_dirs(config_dir):
"""
Find and return paths of all hook directories for the given certbot config directory
:param str config_dir: path to the certbot config directory
:return str[]: list of path to the standard hooks directory for this certbot instance
"""
renewal_hooks_root = os.path.join(config_dir, 'renewal-hooks')
return [os.path.join(renewal_hooks_root, item) for item in ['pre', 'deploy', 'post']]
def generate_test_file_hooks(config_dir, hook_probe):
"""
Create a suite of certbot hook scripts and put them in the relevant hook directory
for the given certbot configuration directory. These scripts, when executed, will write
specific verbs in the given hook_probe file to allow asserting they have effectively
been executed. The deploy hook also checks that the renewal environment variables are set.
:param str config_dir: current certbot config directory
:param hook_probe: path to the hook probe to test hook scripts execution
"""
hook_path = pkg_resources.resource_filename('certbot_integration_tests', 'assets/hook.py')
for hook_dir in list_renewal_hooks_dirs(config_dir):
# We want an equivalent of bash `chmod -p $HOOK_DIR, that does not fail if one folder of
# the hierarchy already exists. It is not the case of os.makedirs. Python 3 has an
# optional parameter `exists_ok` to not fail on existing dir, but Python 2.7 does not.
# So we pass through a try except pass for it. To be removed with dropped support on py27.
try:
os.makedirs(hook_dir)
except OSError as error:
if error.errno != errno.EEXIST:
raise
if os.name != 'nt':
entrypoint_script_path = os.path.join(hook_dir, 'entrypoint.sh')
entrypoint_script = '''\
#!/usr/bin/env bash
set -e
"{0}" "{1}" "{2}" >> "{3}"
'''.format(sys.executable, hook_path, entrypoint_script_path, hook_probe)
else:
entrypoint_script_path = os.path.join(hook_dir, 'entrypoint.ps1')
entrypoint_script = '''\
& "{0}" "{1}" "{2}" >> "{3}"
'''.format(sys.executable, hook_path, entrypoint_script_path, hook_probe)
with open(entrypoint_script_path, 'w') as file_h:
file_h.write(entrypoint_script)
os.chmod(entrypoint_script_path, os.stat(entrypoint_script_path).st_mode | stat.S_IEXEC)
@contextlib.contextmanager
def manual_http_hooks(http_server_root, http_port):
"""
Generate suitable http-01 hooks command for test purpose in the given HTTP
server webroot directory. These hooks command use temporary python scripts
that are deleted upon context exit.
:param str http_server_root: path to the HTTP server configured to serve http-01 challenges
:param int http_port: HTTP port that the HTTP server listen on
:return (str, str): a tuple containing the authentication hook and cleanup hook commands
"""
tempdir = tempfile.mkdtemp()
try:
auth_script_path = os.path.join(tempdir, 'auth.py')
with open(auth_script_path, 'w') as file_h:
file_h.write('''\
#!/usr/bin/env python
import os
import requests
import time
import sys
challenge_dir = os.path.join('{0}', '.well-known', 'acme-challenge')
os.makedirs(challenge_dir)
challenge_file = os.path.join(challenge_dir, os.environ.get('CERTBOT_TOKEN'))
with open(challenge_file, 'w') as file_h:
file_h.write(os.environ.get('CERTBOT_VALIDATION'))
url = 'http://localhost:{1}/.well-known/acme-challenge/' + os.environ.get('CERTBOT_TOKEN')
for _ in range(0, 10):
time.sleep(1)
try:
if request.get(url).status_code == 200:
sys.exit(0)
except requests.exceptions.ConnectionError:
pass
raise ValueError('Error, url did not respond after 10 attempts: {{0}}'.format(url))
'''.format(http_server_root.replace('\\', '\\\\'), http_port))
os.chmod(auth_script_path, 0o755)
cleanup_script_path = os.path.join(tempdir, 'cleanup.py')
with open(cleanup_script_path, 'w') as file_h:
file_h.write('''\
#!/usr/bin/env python
import os
import shutil
well_known = os.path.join('{0}', '.well-known')
shutil.rmtree(well_known)
'''.format(http_server_root.replace('\\', '\\\\')))
os.chmod(cleanup_script_path, 0o755)
yield ('{0} {1}'.format(sys.executable, auth_script_path),
'{0} {1}'.format(sys.executable, cleanup_script_path))
finally:
shutil.rmtree(tempdir)
def generate_csr(domains, key_path, csr_path, key_type=RSA_KEY_TYPE):
"""
Generate a private key, and a CSR for the given domains using this key.
:param domains: the domain names to include in the CSR
:type domains: `list` of `str`
:param str key_path: path to the private key that will be generated
:param str csr_path: path to the CSR that will be generated
:param str key_type: type of the key (misc.RSA_KEY_TYPE or misc.ECDSA_KEY_TYPE)
"""
if key_type == RSA_KEY_TYPE:
key = crypto.PKey()
key.generate_key(crypto.TYPE_RSA, 2048)
elif key_type == ECDSA_KEY_TYPE:
with warnings.catch_warnings():
# Ignore a warning on some old versions of cryptography
warnings.simplefilter('ignore', category=PendingDeprecationWarning)
key = ec.generate_private_key(ec.SECP384R1(), default_backend())
key = key.private_bytes(encoding=Encoding.PEM, format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=NoEncryption())
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
else:
raise ValueError('Invalid key type: {0}'.format(key_type))
with open(key_path, 'wb') as file_h:
file_h.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key))
req = crypto.X509Req()
san = ', '.join('DNS:{0}'.format(item) for item in domains)
san_constraint = crypto.X509Extension(b'subjectAltName', False, san.encode('utf-8'))
req.add_extensions([san_constraint])
req.set_pubkey(key)
req.set_version(2)
req.sign(key, 'sha256')
with open(csr_path, 'wb') as file_h:
file_h.write(crypto.dump_certificate_request(crypto.FILETYPE_ASN1, req))
def read_certificate(cert_path):
"""
Load the certificate from the provided path, and return a human readable version of it (TEXT mode).
:param str cert_path: the path to the certificate
:returns: the TEXT version of the certificate, as it would be displayed by openssl binary
"""
with open(cert_path, 'rb') as file:
data = file.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, data)
return crypto.dump_certificate(crypto.FILETYPE_TEXT, cert).decode('utf-8')
def load_sample_data_path(workspace):
"""
Load the certbot configuration example designed to make OCSP tests, and return its path
:param str workspace: current test workspace directory path
:returns: the path to the loaded sample data directory
:rtype: str
"""
original = pkg_resources.resource_filename('certbot_integration_tests', 'assets/sample-config')
copied = os.path.join(workspace, 'sample-config')
shutil.copytree(original, copied, symlinks=True)
if os.name == 'nt':
# Fix the symlinks on Windows if GIT is not configured to create them upon checkout
for lineage in [
'a.encryption-example.com',
'b.encryption-example.com',
'c.encryption-example.com',
]:
current_live = os.path.join(copied, 'live', lineage)
for name in os.listdir(current_live):
if name != 'README':
current_file = os.path.join(current_live, name)
if not os.path.islink(current_file):
with open(current_file) as file_h:
src = file_h.read()
os.unlink(current_file)
os.symlink(os.path.join(current_live, src), current_file)
return copied
def echo(keyword, path=None):
"""
Generate a platform independent executable command
that echoes the given keyword into the given file.
:param keyword: the keyword to echo (must be a single keyword)
:param path: path to the file were keyword is echoed
:return: the executable command
"""
if not re.match(r'^\w+$', keyword):
raise ValueError('Error, keyword `{0}` is not a single keyword.'
.format(keyword))
return '{0} -c "from __future__ import print_function; print(\'{1}\')"{2}'.format(
os.path.basename(sys.executable), keyword, ' >> "{0}"'.format(path) if path else '')
def get_acme_issuers(context):
"""Gets the list of one or more issuer certificates from the ACME server used by the
context.
:param context: the testing context.
:return: the `list of x509.Certificate` representing the list of issuers.
"""
# TODO: in fact, Boulder has alternate chains in config-next/, just not yet in config/.
if context.acme_server != "pebble":
raise NotImplementedError()
_suppress_x509_verification_warnings()
issuers = []
for i in range(PEBBLE_ALTERNATE_ROOTS + 1):
request = requests.get(PEBBLE_MANAGEMENT_URL + '/intermediates/{}'.format(i), verify=False)
issuers.append(load_pem_x509_certificate(request.content, default_backend()))
return issuers
|
music-player.py | """ Music Player
----------------------------------------
"""
import os
import threading
import time
import tkinter.messagebox
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from ttkthemes import themed_tk as tk
from mutagen.mp3 import MP3
from pygame import mixer
root = tk.ThemedTk()
root.get_themes() #Returns a list of all themes that can be set
root.set_theme("radiance") #Sets an available theme
# Fonts - Arial(corresponds to Helvetica), Courier New(Courier), Comic Sans MS, Fixedsys,
# MS Sans Serif, MS Serif, Symbol, System, Times New Roman(Times), and Verdana
#
# Styles - normal, bold, roman, italic, underline, and overstrike.
statusbar = ttk.Label(root, text="Welcome to Melody",
relief=SUNKEN, anchor=W, font='Times 10 italic')
statusbar.pack(side=BOTTOM, fill=X)
# Create the menubar
menubar = Menu(root)
root.config(menu=menubar)
# Create the submenu
subMenu = Menu(menubar, tearoff=0)
playlist = []
# playlist - contains the full path + filename
# playlistbox - contains just the filename
# Fullpath + filename is required to play the music inside play_music load function
def browse_file():
global filename_path
filename_path = filedialog.askopenfilename()
add_to_playlist(filename_path)
mixer.music.queue(filename_path)
def add_to_playlist(filename):
filename = os.path.basename(filename)
index = 0
playlistbox.insert(index, filename)
playlist.insert(index, filename_path)
index += 1
menubar.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="Open", command=browse_file)
subMenu.add_command(label="Exit", command=root.destroy)
def about_us():
tkinter.messagebox.showinfo(
'About Melody', 'This is a music player build using Python Tkinter by @attreyabhatt')
subMenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Help", menu=subMenu)
subMenu.add_command(label="About Us", command=about_us)
mixer.init() # initializing the mixer
root.title("Melody")
root.iconbitmap(r'images/melody.ico')
# Root Window - StatusBar, LeftFrame, RightFrame
# LeftFrame - The listbox(playlist)
# RightFrame - TopFrame, MiddleFrame and the BottomFrame
leftframe = Frame(root)
leftframe.pack(side=LEFT, padx=30, pady=30)
playlistbox = Listbox(leftframe)
playlistbox.pack()
addBtn = ttk.Button(leftframe, text="+ Add", command=browse_file)
addBtn.pack(side=LEFT)
def del_song():
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
playlistbox.delete(selected_song)
playlist.pop(selected_song)
delBtn = ttk.Button(leftframe, text="- Del", command=del_song)
delBtn.pack(side=LEFT)
rightframe = Frame(root)
rightframe.pack(pady=30)
topframe = Frame(rightframe)
topframe.pack()
lengthlabel = ttk.Label(topframe, text='Total Length : --:--')
lengthlabel.pack(pady=5)
currenttimelabel = ttk.Label(
topframe, text='Current Time : --:--', relief=GROOVE)
currenttimelabel.pack()
def show_details(play_song):
file_data = os.path.splitext(play_song)
if file_data[1] == '.mp3':
audio = MP3(play_song)
total_length = audio.info.length
else:
a = mixer.Sound(play_song)
total_length = a.get_length()
# div - total_length/60, mod - total_length % 60
mins, secs = divmod(total_length, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
lengthlabel['text'] = "Total Length" + ' - ' + timeformat
t1 = threading.Thread(target=start_count, args=(total_length,))
t1.start()
def start_count(t):
global paused
# mixer.music.get_busy(): - Returns FALSE when we press the stop button(music stop playing)
# Continue - Ignores all of the statements below it. We check if music is paused or not.
current_time = 0
while current_time <= t and mixer.music.get_busy():
if paused:
continue
else:
mins, secs = divmod(current_time, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
currenttimelabel['text'] = "Current Time" + ' - ' + timeformat
time.sleep(1)
current_time += 1
def play_music():
global paused
if paused:
mixer.music.unpause()
statusbar['text'] = "Music Resumed"
paused = FALSE
else:
try:
stop_music()
time.sleep(1)
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
play_it = playlist[selected_song]
mixer.music.load(play_it)
mixer.music.play()
statusbar['text'] = "Playing music" + \
' - ' + os.path.basename(play_it)
show_details(play_it)
except:
tkinter.messagebox.showerror(
'File not found', 'Melody could not find the file. Please check again.')
def stop_music():
mixer.music.stop()
statusbar['text'] = "Music Stopped"
paused = FALSE
def pause_music():
global paused
paused = TRUE
mixer.music.pause()
statusbar['text'] = "Music Paused"
def rewind_music():
play_music()
statusbar['text'] = "Music Rewinded"
def set_vol(val):
volume = float(val) / 100
mixer.music.set_volume(volume)
# set_volume of mixer takes value only from 0 to 1. Example - 0, 0.1, 0.55, 0.54.0.99, 1
muted = FALSE
def mute_music():
global muted
if muted:
# Unmute the music
mixer.music.set_volume(0.7)
volumeBtn.configure(image=volumePhoto)
scale.set(70)
muted = FALSE
else:
# mute the music
mixer.music.set_volume(0)
volumeBtn.configure(image=mutePhoto)
scale.set(0)
muted = TRUE
middleframe = Frame(rightframe)
middleframe.pack(pady=30, padx=30)
playPhoto = PhotoImage(file='images/play.png')
playBtn = ttk.Button(middleframe, image=playPhoto, command=play_music)
playBtn.grid(row=0, column=0, padx=10)
stopPhoto = PhotoImage(file='images/stop.png')
stopBtn = ttk.Button(middleframe, image=stopPhoto, command=stop_music)
stopBtn.grid(row=0, column=1, padx=10)
pausePhoto = PhotoImage(file='images/pause.png')
pauseBtn = ttk.Button(middleframe, image=pausePhoto, command=pause_music)
pauseBtn.grid(row=0, column=2, padx=10)
# Bottom Frame for volume, rewind, mute etc.
bottomframe = Frame(rightframe)
bottomframe.pack()
rewindPhoto = PhotoImage(file='images/rewind.png')
rewindBtn = ttk.Button(bottomframe, image=rewindPhoto, command=rewind_music)
rewindBtn.grid(row=0, column=0)
mutePhoto = PhotoImage(file='images/mute.png')
volumePhoto = PhotoImage(file='images/volume.png')
volumeBtn = ttk.Button(bottomframe, image=volumePhoto, command=mute_music)
volumeBtn.grid(row=0, column=1)
scale = ttk.Scale(bottomframe, from_=0, to=100,
orient=HORIZONTAL, command=set_vol)
scale.set(70) # implement the default value of scale when music player starts
mixer.music.set_volume(0.7)
scale.grid(row=0, column=2, pady=15, padx=30)
def on_closing():
stop_music()
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
|
Blog2EpubKivy.py | #!/usr/bin/env python3
# -*- coding : utf-8 -*-
import sys
import os
import platform
import yaml
import subprocess
import logging
from pathlib import Path
from urllib import parse
from datetime import datetime
import threading
import webbrowser
from blog2epub.Blog2Epub import Blog2Epub
from blog2epub.crawlers.Crawler import EmptyInterface
from kivy.app import App
from kivy.uix.image import Image
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.stacklayout import StackLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.popup import Popup
from kivy.core.window import Window
from kivy.config import Config
from kivy.metrics import Metrics, dp
SIZE = 3 / Metrics.density / Metrics.density
F_SIZE = 3 / Metrics.density
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
now = datetime.now()
date_time = now.strftime("%Y-%m-%d[%H.%M.%S]")
logging_filename = os.path.join(str(Path.home()), '.blog2epub', 'blog2epub_{}.log'.format(date_time))
logging.basicConfig(
filename=logging_filename,
encoding='utf-8',
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
def get_image_file(filename):
in_osx_app = os.path.join(
os.path.dirname(sys.executable).replace('/Contents/MacOS','/Contents/Resources'),
filename
)
in_sources = os.path.join(Path(__file__).parent.resolve(), '..', 'images', filename)
result = False
if os.path.isfile(in_osx_app):
result = in_osx_app
if os.path.isfile(in_sources):
result = in_sources
print(result)
return result
class StyledLabel(Label):
def __init__(self, **kwargs):
super(StyledLabel, self).__init__(**kwargs)
self.font_size = dp(10*F_SIZE)
self.font_name = 'RobotoMono-Regular'
self.width = dp(40*F_SIZE)
self.size_hint = (None, 1)
class StyledTextInput(TextInput):
def __init__(self, **kwargs):
super(StyledTextInput, self).__init__(**kwargs)
self.font_size = dp(8*F_SIZE)
self.font_name = 'RobotoMono-Regular'
self.halign = 'center'
self.valign = 'middle'
self.size_hint = kwargs.get('size_hint', (0.25, 1))
self.text = kwargs.get('text', '')
class StyledButton(Button):
def __init__(self, **kwargs):
super(StyledButton, self).__init__(**kwargs)
self.font_size = dp(10*F_SIZE)
self.font_name = 'RobotoMono-Regular'
self.width = dp(80*F_SIZE)
self.size_hint = (None, 1)
class Blog2EpubKivyWindow(BoxLayout):
def __init__(self, **kwargs):
super(Blog2EpubKivyWindow, self).__init__(**kwargs)
self.orientation = 'vertical'
self.padding = dp(6*SIZE)
self.spacing = dp(2*SIZE)
self.settings = Blog2EpubSettings()
self.row1 = BoxLayout(
orientation='horizontal',
size_hint=(1, 0.1),
spacing=dp(2*SIZE)
)
self.add_widget(self.row1)
self.row1.add_widget(StyledLabel(text='Url:'))
self.url_entry = StyledTextInput(
size_hint=(0.8, 1),
text=self.settings.get('url')
)
self.row1.add_widget(self.url_entry)
self.download_button = StyledButton(text='Download')
self.download_button.bind(on_press=self.download)
self.row1.add_widget(self.download_button)
self.row2 = BoxLayout(
orientation='horizontal',
size_hint=(1, 0.1),
spacing=dp(2*SIZE)
)
self.add_widget(self.row2)
self.row2.add_widget(StyledLabel(text='Limit:'))
self.limit_entry = StyledTextInput(text=self.settings.get('limit'))
self.row2.add_widget(self.limit_entry)
self.row2.add_widget(StyledLabel(text='Skip:'))
self.skip_entry = StyledTextInput(text=self.settings.get('skip'))
self.row2.add_widget(self.skip_entry)
self.about_button = StyledButton(text='About')
self.about_button.bind(on_press=self.about_popup)
self.row2.add_widget(self.about_button)
self.console_output = TextInput(
font_size=dp(6*F_SIZE),
font_name='RobotoMono-Regular',
background_color='black',
foreground_color='white',
size_hint=(1, 0.88),
readonly=True
)
self.add_widget(self.console_output)
self.interface = KivyInterface(self.console_output)
def _get_url(self):
if parse.urlparse(self.url_entry.text):
return self.url_entry.text
raise Exception('Blog url is not valid.')
@staticmethod
def _is_int(value):
try:
int(value)
return int(value)
except:
return None
def _get_params(self):
return {
'interface': self.interface,
'url': self._get_url(),
'include_images': True,
'images_height': 800,
'images_width': 600,
'images_quality': 40,
'start': None,
'end': None,
'limit': self._is_int(self.limit_entry.text),
'skip': self._is_int(self.skip_entry.text),
'force_download': False,
'file_name': None,
'cache_folder': os.path.join(str(Path.home()), '.blog2epub'),
'destination_folder': str(Path.home()),
}
def _download_ebook(self, blog2epub):
self.interface.print('Downloading...')
blog2epub.download()
self.download_button.disabled = False
def download(self, instance):
self.interface.clear()
self.download_button.disabled = True
try:
self.saveSettings()
download_thread = threading.Thread(target=self._download_ebook, kwargs={'blog2epub':Blog2Epub(self._get_params())})
download_thread.start()
except Exception as e:
self.download_button.disabled = False
self.interface.exception(e)
def saveSettings(self):
self.settings.set('url', self.url_entry.text)
self.settings.set('limit', self.limit_entry.text)
self.settings.set('skip', self.skip_entry.text)
self.settings.save()
def about_popup(self, instance):
about_content = BoxLayout(orientation='vertical')
about_content.add_widget(Image(
source = get_image_file('blog2epub.png'),
allow_stretch = True,
size_hint=(1, 0.7)
))
about_content.add_widget(AboutPopupLabel(
text = 'v. {}'.format(Blog2Epub.VERSION)
))
about_content.add_widget(AboutPopupLabel(
text = 'by Bohdan Bobrowski'
))
def about_url_click(instance):
webbrowser.open("https://github.com/bohdanbobrowski/blog2epub")
about_content.add_widget(Button(
text='github.com/bohdanbobrowski/blog2epub',
font_size=dp(8*F_SIZE),
font_name='RobotoMono-Regular',
size_hint=(1, 0.1),
on_press=about_url_click
))
about_popup = Popup(
title='Blog2Epub',
title_size=dp(10*F_SIZE),
title_font='RobotoMono-Regular',
content=about_content,
size_hint=(None, None),
size=(dp(210*F_SIZE), dp(180*F_SIZE)),
)
about_popup.open()
class AboutPopupLabel(Label):
def __init__(self, **kwargs):
super(AboutPopupLabel, self).__init__(**kwargs)
self.font_size = dp(8*F_SIZE)
self.font_name = 'RobotoMono-Regular'
self.size_hint = (1, 0.1)
class KivyInterface(EmptyInterface):
def __init__(self, console):
self.console = console
def print(self, text):
logging.info(text)
self.console.text = self.console.text + text + "\n"
def notify(self, title, subtitle, message, cover):
if platform.system() == "Darwin":
command = [
'terminal-notifier',
'-title {!r}'.format(title),
'-subtitle {!r}'.format(subtitle),
'-message {!r}'.format(message),
'-contentImage {!r}'.format(cover),
'-sound chime',
'-appIcon {!r}'.format(os.path.join(os.path.dirname(sys.executable), 'blogspot.png')),
'-open file:{!r}'.format(message),
]
os.system('terminal-notifier {}'.format(' '.join(command)))
if platform.system() == "Linux":
subprocess.Popen(['notify-send', subtitle + ': ' + message])
def exception(self, e):
logging.error("Exception: " + str(e))
self.print("Exception: " + str(e))
def clear(self):
self.console.text = ""
class Blog2EpubSettings(object):
def __init__(self):
self.path = os.path.join(str(Path.home()), '.blog2epub')
self._prepare_path()
self.fname = os.path.join(self.path, 'blog2epub.yml')
self._data = self._read()
def _prepare_path(self):
if not os.path.exists(self.path):
os.makedirs(self.path)
def _read(self):
if not os.path.isfile(self.fname):
self._data = self._get_default()
self.save()
with open(self.fname, 'rb') as stream:
data_in_file = yaml.safe_load(stream)
data = self._get_default()
for k, v in data.items():
if k in data_in_file:
data[k] = data_in_file[k]
return data
def _get_default(self):
return {
'url': '',
'limit': '',
'skip': ''
}
def save(self):
with open(self.fname, 'w') as outfile:
yaml.dump(self._data, outfile, default_flow_style=False)
def set(self, key, value):
self._data[key] = value
def get(self, key):
if key in self._data:
return self._data[key]
else:
return None
class Blog2EpubKivy(App):
def __init__(self, **kwargs):
super(Blog2EpubKivy, self).__init__(**kwargs)
self.title = 'blog2epub - v. {}'.format(Blog2Epub.VERSION)
self.icon = get_image_file('blog2epub.icns')
def build(self):
Window.resizable = False
Window.size = (dp(300*SIZE), dp(200*SIZE))
return Blog2EpubKivyWindow()
def main():
Blog2EpubKivy().run()
if __name__ == "__main__":
main()
|
loggers.py | """PC est magique - Custom Flask Loggers"""
import os
import logging
from logging import StreamHandler
from logging.handlers import TimedRotatingFileHandler
import threading
import flask
from discord_webhook import DiscordWebhook
import requests
from app import PCEstMagiqueApp
from app.tools import typing
def _execute_webhook(app: PCEstMagiqueApp,
webhook: DiscordWebhook) -> None:
# To be called in a separate tread
with app.app_context():
response = webhook.execute() # type: ignore
if not response:
app.logger.error(
f"ATTENTION : Échec lors de l'envoi du webhook {webhook.url} "
f"({webhook.content}): {response.code} {response.text}"
)
class DiscordHandler(StreamHandler):
"""Logging handler using a webhook to send log to a Discord server
Args:
webhook: Webhook URL to use
(``"https://discord.com/api/webhooks/<server>/<id>"``)
"""
def __init__(self, webhook: str) -> None:
"""Initializes self."""
super().__init__()
self.webhook = webhook
def emit(self, record: logging.LogRecord) -> requests.Response:
"""Method called to make this handler send a record."""
content = self.format(record)
webhook = DiscordWebhook(url=self.webhook, content=content)
# Send in a separate thread
app = typing.cast(PCEstMagiqueApp, flask.current_app._get_current_object())
threading.Thread(target=_execute_webhook, args=(app, webhook)).start()
class InfoErrorFormatter(logging.Formatter):
"""Utility formatter allowing to use different formats for info and errors.
Args:
info_fmt: Formatter format to use for levels DEBUG and INFO.
error_fmt: Formatter format to use for levels above INFO.
*args, **kwargs: Passed to :class:`logging.Formatter`
"""
def __init__(self, info_fmt: str, error_fmt: str, *args, **kwargs) -> None:
"""Initializes self."""
super().__init__(info_fmt, *args, **kwargs)
self.info_fmt = info_fmt
self.error_fmt = error_fmt
def format(self, record: logging.LogRecord) -> str:
"""Method called to make this formatter format a record."""
format_orig = self._fmt
if record.levelno > logging.INFO:
self._fmt = self.error_fmt
else:
self._fmt = self.info_fmt
result = super().format(record)
self._fmt = format_orig
return record.name.ljust(12) + result
class DiscordFormatter(logging.Formatter):
"""Base formatter preparing a record to be send to a Discord server.
Args:
role_id (str): Optional 18-digit ID used to mention a specific role
in subclasses using :attr:`DiscordFormatter.role_mention`.
"""
def __init__(self, role_id: str | None = None) -> None:
"""Initializes self."""
super().__init__()
self.role_mention = f"<@&{role_id}> " if role_id else ""
def format(self, record: logging.LogRecord) -> str:
"""Method called to make this formatter format a record.
Truncates message to 1900 characters (Discord limits to 2000).
"""
msg = record.getMessage()
if len(msg) > 1900: # Discord limitation
msg = "[...]\n" + msg[-1900:]
return msg
class DiscordErrorFormatter(DiscordFormatter):
""":class:`.DiscordFormatter` subclass used to transmit error messages."""
def format(self, record: logging.LogRecord) -> str:
"""Method called to make this formatter format a record.
Retrieves request IP and logged-in pceen name if applicable.
"""
msg = super().format(record)
remote_ip = flask.request.headers.get("X-Real-Ip", "<unknown IP>")
try:
if flask.g.logged_in:
remote_ip += f" / {flask.g.pceen.full_name[:25]}"
except AttributeError:
pass
return (f"{self.role_mention}ALED ça a planté ! (chez {remote_ip})\n"
f"```{msg}```")
# Custom formatter
class DiscordLoggingFormatter(DiscordFormatter):
""":class:`.DiscordFormatter` subclass used to transmit actions infos."""
def format(self, record: logging.LogRecord) -> str:
"""Method called to make this formatter format a record.
Retrieves logged-in pceen and doaser names if applicable.
"""
msg = super().format(record)
try:
if flask.g.logged_in:
user = flask.g.logged_in_user.username
if flask.g.doas:
user += f" AS {flask.g.pceen.username}"
else:
user = "(anonymous)"
except AttributeError:
user = "(before context)"
if record.levelno > logging.INFO:
return f"`{user}: {record.levelname}: {msg}` ({self.role_mention})"
else:
return f"`{user}: {msg}`"
def configure_logging(app: PCEstMagiqueApp) -> None:
"""Configure logging for the PC est magique web app.
Setup :attr:`app.logger <flask.Flask.logger>` to log errors to
``app.config["ERROR_WEBHOOK"]`` Discord webhook and everything to a
journalized file, and adds a child logger ``app.actions_logger``
("app.actions") logging actions to ``app.config["LOGGING_WEBHOOK"]``.
"""
if app.config["ERROR_WEBHOOK"] and not (app.debug or app.testing):
# Alert messages for errors
discord_errors_handler = DiscordHandler(app.config["ERROR_WEBHOOK"])
discord_errors_handler.setLevel(logging.ERROR)
discord_errors_handler.setFormatter(DiscordErrorFormatter(
app.config.get("GRI_ROLE_ID")
))
app.logger.addHandler(discord_errors_handler)
if app.config["LOGGING_WEBHOOK"]:
# Logging messages for actions
discord_actions_handler = DiscordHandler(app.config["LOGGING_WEBHOOK"])
discord_actions_handler.setLevel(logging.DEBUG if app.debug
else logging.INFO)
discord_actions_handler.setFormatter(DiscordLoggingFormatter(
app.config.get("GRI_ROLE_ID")
))
app.actions_logger.addHandler(discord_actions_handler)
# File logging
if not os.path.exists("logs"):
os.mkdir("logs")
file_handler = TimedRotatingFileHandler("logs/pc-est-magique.log",
when="D")
file_handler.setFormatter(InfoErrorFormatter(
"{asctime} {levelname}: {message}",
"{asctime} {levelname}: {message} [in {pathname}:{lineno}]",
style="{",
))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
# Start logs
app.logger.setLevel(logging.INFO)
app.actions_logger.setLevel(logging.INFO)
|
08_pendulum_d4pg_main.py | # https://github.com/openai/gym/blob/master/gym/envs/classic_control/pendulum.py
# https://mspries.github.io/jimmy_pendulum.html
#!/usr/bin/env python3
import time
import torch
import torch.multiprocessing as mp
import os, sys
print("PyTorch Version", torch.__version__)
current_path = os.path.dirname(os.path.realpath(__file__))
PROJECT_HOME = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
if PROJECT_HOME not in sys.path:
sys.path.append(PROJECT_HOME)
from common.logger import get_logger
from rl_main import rl_utils
from common.fast_rl.rl_agent import float32_preprocessor
from common.fast_rl import actions, rl_agent, experience_single
from common.fast_rl.common import statistics, utils
from config.parameters import PARAMETERS as params
MODEL_SAVE_DIR = os.path.join(PROJECT_HOME, "out", "model_save_files")
if not os.path.exists(MODEL_SAVE_DIR):
os.makedirs(MODEL_SAVE_DIR)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
if torch.cuda.is_available():
device = torch.device("cuda" if params.CUDA else "cpu")
else:
device = torch.device("cpu")
my_logger = get_logger("openai_pendulum_d4pg")
DELTA_Z = (params.V_MAX - params.V_MIN) / (params.N_ATOMS - 1)
def play_func(exp_queue, env, net):
print(env.action_space.low[0], env.action_space.high[0])
action_min = env.action_space.low[0]
action_max = env.action_space.high[0]
action_selector = actions.EpsilonGreedyD4PGActionSelector(epsilon=params.EPSILON_INIT)
epsilon_tracker = actions.EpsilonTracker(
action_selector=action_selector,
eps_start=params.EPSILON_INIT,
eps_final=params.EPSILON_MIN,
eps_frames=params.EPSILON_MIN_STEP
)
agent = rl_agent.AgentD4PG(
net, n_actions=1, action_selector=action_selector,
action_min=action_min, action_max=action_max, device=device, preprocessor=float32_preprocessor
)
experience_source = experience_single.ExperienceSourceSingleEnvFirstLast(
env, agent, gamma=params.GAMMA, steps_count=params.N_STEP, step_length=-1
)
exp_source_iter = iter(experience_source)
if params.DRAW_VIZ:
stat = statistics.StatisticsForPolicyBasedRL(method="policy_gradient")
else:
stat = None
step_idx = 0
best_mean_episode_reward = 0.0
with utils.RewardTracker(params=params, frame=False, stat=stat) as reward_tracker:
while step_idx < params.MAX_GLOBAL_STEP:
# 1 스텝 진행하고 exp를 exp_queue에 넣음
step_idx += 1
exp = next(exp_source_iter)
exp_queue.put(exp)
epsilon_tracker.udpate(step_idx)
episode_rewards = experience_source.pop_episode_reward_lst()
if episode_rewards:
current_episode_reward = episode_rewards[0]
solved, mean_episode_reward = reward_tracker.set_episode_reward(
current_episode_reward, step_idx, epsilon=action_selector.epsilon
)
model_save_condition = [
reward_tracker.mean_episode_reward > best_mean_episode_reward,
step_idx > params.EPSILON_MIN_STEP
]
if reward_tracker.mean_episode_reward > best_mean_episode_reward:
best_mean_episode_reward = reward_tracker.mean_episode_reward
if all(model_save_condition) or solved:
rl_agent.save_model(
MODEL_SAVE_DIR, params.ENVIRONMENT_ID.value, net.__name__, net, step_idx, mean_episode_reward
)
if solved:
break
exp_queue.put(None)
def main():
mp.set_start_method('spawn')
env = rl_utils.get_environment(owner="worker", params=params)
print("env:", params.ENVIRONMENT_ID)
print("observation_space:", env.observation_space)
print("action_space:", env.action_space)
rl_algorithm = rl_utils.get_rl_algorithm(env=env, worker_id=0, logger=my_logger, params=params)
exp_queue = mp.Queue(maxsize=params.TRAIN_STEP_FREQ * 2)
play_proc = mp.Process(target=play_func, args=(exp_queue, env, rl_algorithm.model))
play_proc.start()
time.sleep(0.5)
step_idx = 0
while play_proc.is_alive():
step_idx += params.TRAIN_STEP_FREQ
exp = None
for _ in range(params.TRAIN_STEP_FREQ):
exp = exp_queue.get()
if exp is None:
play_proc.join()
break
rl_algorithm.buffer._add(exp)
if len(rl_algorithm.buffer) < params.MIN_REPLAY_SIZE_FOR_TRAIN:
continue
if exp is not None and exp.last_state is None:
for _ in range(3):
rl_algorithm.train_net(step_idx=step_idx)
if __name__ == "__main__":
main()
|
_debugger_case_multiprocessing_2.py | from multiprocessing import Process, Queue
class Foo:
def __init__(self, value):
self.value = value # break 2 here
def f(q):
q.put(Foo(1))
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
print(q.get().value) # break 1 here
print('TEST SUCEEDED!')
p.join()
|
keep_alive.py | from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Server Host Connected and Synced to Main Code."
def run():
app.run(host='0.0.0.0',port=1215)
def keep_alive():
t = Thread(target=run)
t.start() |
_debugger_case_thread_creation_deadlock.py | from threading import Thread, Event
def create_thread():
event = Event()
event_set = [False]
def run_thread():
event_set[0] = True
event.set()
t = Thread(target=run_thread)
t.start()
try:
event.wait(5)
# note: not using `assert event.wait(5)` for py2.6 compatibility.
assert event_set[0]
print('TEST SUCEEDED!')
return 'create_thread:ok'
except:
import traceback;traceback.print_exc()
a = 10 # Add breakpoint here and evaluate create_thread()
|
trezor.py | import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path
from electrum import constants
from electrum.i18n import _
from electrum.plugin import Device, runs_in_hwd_thread
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET
from electrum.logging import get_logger
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
LibraryFoundButUnusable, OutdatedHwFirmwareException,
get_xpubs_and_der_suffixes_from_txinout)
_logger = get_logger(__name__)
try:
import trezorlib
import trezorlib.transport
from trezorlib.transport.bridge import BridgeTransport, call_bridge
from .clientbase import TrezorClientBase
from trezorlib.messages import (
Capability, BackupType, RecoveryDeviceType, HDNodeType, HDNodePathType,
InputScriptType, OutputScriptType, MultisigRedeemScriptType,
TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx)
from trezorlib.client import PASSPHRASE_ON_DEVICE
TREZORLIB = True
except Exception as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'trezorlib'):
_logger.exception('error importing trezor plugin deps')
TREZORLIB = False
class _EnumMissing:
def __init__(self):
self.counter = 0
self.values = {}
def __getattr__(self, key):
if key not in self.values:
self.values[key] = self.counter
self.counter += 1
return self.values[key]
Capability = _EnumMissing()
BackupType = _EnumMissing()
RecoveryDeviceType = _EnumMissing()
PASSPHRASE_ON_DEVICE = object()
# Trezor initialization methods
TIM_NEW, TIM_RECOVER = range(2)
TREZOR_PRODUCT_KEY = 'Trezor'
class TrezorKeyStore(Hardware_KeyStore):
hw_type = 'trezor'
device = TREZOR_PRODUCT_KEY
plugin: 'TrezorPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
msg_sig = client.sign_message(address_path, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None:
raise UserFacingException(_('Missing previous tx.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class TrezorInitSettings(NamedTuple):
word_count: int
label: str
pin_enabled: bool
passphrase_enabled: bool
recovery_type: Any = None
backup_type: int = BackupType.Bip39
no_backup: bool = False
class TrezorPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://wallet.trezor.io'
libraries_URL = 'https://pypi.org/project/trezor/'
minimum_firmware = (1, 5, 2)
keystore_class = TrezorKeyStore
minimum_library = (0, 12, 0)
maximum_library = (0, 13)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
DEVICE_IDS = (TREZOR_PRODUCT_KEY,)
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_enumerate_func(self.enumerate)
self._is_bridge_available = None
def get_library_version(self):
import trezorlib
try:
version = trezorlib.__version__
except Exception:
version = 'unknown'
if TREZORLIB:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
@runs_in_hwd_thread
def is_bridge_available(self) -> bool:
# Testing whether the Bridge is available can take several seconds
# (when it is not), as it is slow to timeout, hence we cache it.
if self._is_bridge_available is None:
try:
call_bridge("enumerate")
except Exception:
self._is_bridge_available = False
# never again try with Bridge due to slow timeout
BridgeTransport.ENABLED = False
else:
self._is_bridge_available = True
return self._is_bridge_available
@runs_in_hwd_thread
def enumerate(self):
# If there is a bridge, prefer that.
# On Windows, the bridge runs as Admin (and Electrum usually does not),
# so the bridge has better chances of finding devices. see #5420
# This also avoids duplicate entries.
if self.is_bridge_available():
devices = BridgeTransport.enumerate()
else:
devices = trezorlib.transport.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key=TREZOR_PRODUCT_KEY,
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
@runs_in_hwd_thread
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = trezorlib.transport.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
# note that this call can still raise!
return TrezorClientBase(transport, handler, self)
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['TrezorClientBase']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "UraniumX"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.").format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, device_id)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler):
if method == TIM_RECOVER and settings.recovery_type == RecoveryDeviceType.ScrambledWords:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength_from_word_count = {12: 128, 18: 192, 20: 128, 24: 256, 33: 256}
client.reset_device(
strength=strength_from_word_count[settings.word_count],
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label,
backup_type=settings.backup_type,
no_backup=settings.no_backup)
elif method == TIM_RECOVER:
client.recover_device(
recovery_type=settings.recovery_type,
word_count=settings.word_count,
passphrase_protection=settings.passphrase_enabled,
pin_protection=settings.pin_enabled,
label=settings.label)
if settings.recovery_type == RecoveryDeviceType.Matrix:
handler.close_matrix_dialog()
else:
raise RuntimeError("Unsupported recovery method")
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not client.is_uptodate():
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
raise OutdatedHwFirmwareException(msg)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub('m', 'standard', creating=is_creating_wallet))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_trezor_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_trezor_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
prev_tx = {bfh(txhash): self.electrum_tx_to_txtype(tx) for txhash, tx in prev_tx.items()}
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
details = SignTx(lock_time=tx.locktime, version=tx.version)
signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx)
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
script_type = self.get_trezor_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client = self.get_client(keystore)
client.show_address(address_path, script_type, multisig)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'TrezorKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_trezor_input_script_type(txin.script_type)
txinputtype = TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n = full_path
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'TrezorKeyStore'):
def create_output_by_derivation():
script_type = self.get_trezor_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
t.inputs = self.tx_inputs(tx)
t.bin_outputs = [
TxOutputBinType(amount=o.value, script_pubkey=o.scriptpubkey)
for o in tx.outputs()
]
return t
|
MusicCmd.py | import copy
import json
import os
import sys
import threading
from cmd import Cmd
import mpv
from wcwidth import wcswidth
from api import (MiguMusicAPI, NeteaseCloudMusicAPI, QQMusicApi,
convert_interval, convert_singerlist)
class MusicShell(Cmd):
''' Main command shell '''
prompt = 'BowenMusic> '
with open('src/intro.txt', 'r') as f:
intro = f.read() # welcome
def __init__(self):
Cmd.__init__(self)
self.search_result = {
'source': '',
'content': []
}
self.api = QQMusicApi() # default qq
self.current_song = None # dict()
self.playlist = list()
# self.player = mpv.MPV(ytdl=True)
# playing signal
self.playing_s = False
self.playing_pl = False
self.interrupt = False
self.playing = False
# initialize playlist info
self._get_playlist_info()
# command #
def do_pl(self, arg):
''' show playlist
> pl # all playlists overview
> pl 2 # show songs in 2nd playlist
'''
# check playlist exists
self._get_playlist_info()
if len(self.playlist) == 0:
print('No playlist found, use `cpl` to create one.')
return
# no arg, show all playlists' names
if arg == '':
index = 1
for index in range(len(self.playlist)):
print('{}. {}'.format(index+1, self.playlist[index]))
return
try: # if arg is `int` and in range
pl_index = int(arg)
assert (pl_index >= 1 and pl_index <= len(self.playlist))
except:
print('Arguments should be in [1, {}].'.format(len(self.playlist)))
return
print('\n歌单: {}'.format(self.playlist[pl_index-1]))
print('-'*15)
print('{}{}{}{}'.format(
self._rpad('', 4),
self._rpad('歌曲', 30),
self._rpad('歌手', 20),
self._rpad('专辑', 30)))
print('-'*80)
with open(os.path.join(self.LIST_DIR,
'{}.json'.format(self.playlist[pl_index-1])), 'r') as f:
song_list = json.load(f)
for index in range(len(song_list)):
print('{}{}{}{}'.format(
self._rpad(str(index+1)+'.', 4),
self._rpad(song_list[index]['song_name'], 30),
self._rpad(song_list[index]['singers'], 20),
self._rpad(song_list[index]['album_name'], 30)))
print('-'*80)
def do_cpl(self, arg):
''' create playlist '''
print('Please input the name of new playlist: ', end='')
pl_name = input()
for file_path in os.listdir(self.LIST_DIR):
if pl_name == file_path.split('.')[0]:
print('Playlist {} already exists.'.format(pl_name))
return
# create playlist json file
with open('{}/{}.json'.format(self.LIST_DIR, pl_name), 'w') as f:
f.write('[]')
print('Playlist {} created.'.format(pl_name))
self._get_playlist_info()
def do_rpl(self, arg):
''' rename playlist '''
# check playlist exists
if len(self.playlist) == 0:
print('No playlist found, use `cpl` to create one.')
return
# arg format
if arg == '':
print('Not enough arguments, the index of playlist should be offered.')
return
try: # if arg is `int` and in range
pl_index = int(arg)
assert (pl_index >= 1 and pl_index <= len(self.playlist))
except:
print('Arguments should be in [1, {}].'.format(len(self.playlist)))
return
# check if the playlist exists
if os.path.exists('{}/{}.json'.format(self.LIST_DIR, self.playlist[pl_index-1])):
old_path = os.path.join(self.LIST_DIR, self.playlist[pl_index-1]+'.json')
else:
print('Playlist {} doesn\'t exist.'.format(self.playlist[pl_index-1]))
return
# get new name from user
print('Please input a new name: ', end='')
pl_name = input()
# check if new name is the same as the old name
if pl_name == self.playlist[pl_index-1]:
print('Name not changed.')
return
new_path = os.path.join(self.LIST_DIR, pl_name+'.json')
os.rename(old_path, new_path)
print('Playlist {} is renamed to {}.'.format(self.playlist[pl_index-1], pl_name))
self._get_playlist_info()
def do_dpl(self, arg):
''' delete playlist '''
# check playlist exists
if len(self.playlist) == 0:
print('No playlist found, user `cpl` to create one.')
return
# arg format
if arg == '':
print('Not enough arguments, the index of playlist should be offered.')
return
try: # if arg is `int` and in range
pl_index = int(arg)
assert (pl_index >= 1 and pl_index <= len(self.playlist))
except:
print('Arguments should be in [1, {}].'.format(len(self.playlist)))
return
# check if the playlist exists
if os.path.exists('{}/{}.json'.format(self.LIST_DIR, self.playlist[pl_index-1])):
delete_path = os.path.join(self.LIST_DIR, self.playlist[pl_index-1]+'.json')
else:
print('Playlist {} doesn\'t exist.'.format(self.playlist[pl_index-1]))
return
os.remove(delete_path)
print('Playlist {} is deleted.'.format(self.playlist[pl_index-1]))
self._get_playlist_info()
def do_search(self, keyword):
''' search according to keyword
results are stored in `list` self.search_result
'''
# check arg
if keyword == '':
print('Not enough arguments, keywords should be offered.')
return
if self.api.name == 'qq':
self.search_result['source'] = 'qq'
self.search_result['content'] = self.api.search(1, keyword)
if len(self.search_result['content']) != 0: # result exists
self._show_search()
else:
print('No result found.')
elif self.api.name == 'netease':
self.search_result['source'] = 'netease'
self.search_result['content'] = self.api.search(keyword, 1)
if len(self.search_result['content']) != 0:
self._show_search()
else:
print('No result found.')
elif self.api.name == 'migu':
self.search_result['source'] = 'migu'
self.search_result['content'] = self.api.search(keyword)
if len(self.search_result['content']) != 0:
self._show_search()
else:
print('No result found.')
else:
print('api name not found.')
return
def do_add(self, arg):
''' add songs in last search result to playlist
> add 10 1 # 10 is index in search result, 1 is index of playlist
'''
# check search result
if len(self.search_result['content']) == 0:
print('No search result, do search first.')
return
# check playlist exists
if len(self.playlist) == 0:
print('No playlist found, use `cpl` to create one.')
return
# check arguments format
arg_list = arg.split() # check number of arguments
if len(arg_list) < 2:
print('Not enough arguments, the number of arguments should be 2.')
return
elif len(arg_list) > 2:
print('Too much arguments, the number of arguments should be 2.')
return
try: # check if arguments are number
s_index = int(arg_list[0]) # index in search result
pl_index = int(arg_list[1]) # index in playlist
except:
print('Arguments should be [the index in search result] [the index of playlist].')
return
# check if index is out of range
s_index_upper_limit = len(self.search_result['content']) if len(self.search_result['content']) <= 10 else 10
if s_index < 1 or s_index > s_index_upper_limit:
print('The index in search result must be in [1, {}].'.format(s_index_upper_limit))
return
if pl_index < 1 or pl_index > len(self.playlist):
print('The index of playlist must be in [1, {}].'.format(len(self.playlist)))
return
# add song to playlist
add_song = self.search_result['content'][s_index-1]
source = self.search_result['source']
pl_name = self.playlist[pl_index-1]
# if playlist name exists locally
if os.path.exists('{}/{}.json'.format(self.LIST_DIR, pl_name)):
full_path = '{}/{}.json'.format(self.LIST_DIR, pl_name)
else:
print('playlist {} dosen\'t exist.'.format(pl_name))
return
# generate stored info
music_info = dict()
music_info['source'] = source
music_info['song_name'] = add_song['song_name']
music_info['singers'] = add_song['singer_list'] if source == 'migu' else convert_singerlist(add_song['singer_list'])
music_info['album_name'] = add_song['album_name']
music_info['interval'] = '' if source == 'migu' else convert_interval(add_song['interval'])
music_info['song_mid'] = add_song['song_mid']
music_info['url'] = add_song['url'] if source == 'migu' else ''
# add to playlist
with open(full_path, 'r') as f:
song_list = json.load(f)
song_list.append(music_info)
with open(full_path, 'w') as f:
json.dump(song_list, f, indent=1)
print('Add {} to playlist {} successfully.'.format(add_song['song_name'], pl_name))
def do_play(self, arg):
''' play songs
> play # default play from playlist 1
> play -pl 2 # play from playlist 2
> play -s 2 # play the 2nd song in the last search result
'''
# update playlist
self._get_playlist_info()
# play from playlist 1 by default
if arg == '':
if len(self.playlist) == 0:
print('No playlist found, use `cpl` to create one.')
return
pl_name = self.playlist[0]
pl_path = os.path.join(self.LIST_DIR, pl_name+'.json')
with open(pl_path, 'r') as f:
song_list = json.load(f)
if len(song_list) == 0:
print('No song found in playlist {}.'.format(pl_name))
return
play_pl_thread = threading.Thread(target=self._play_from_playlist, args=(song_list,))
play_pl_thread.start()
# play from playlist or search
else:
# args number
args = arg.split()
if len(args) < 2:
print('Not enough arguments, use `help play` for more information.')
return
if len(args) > 2:
print('Too much arguments, use `help play` for more information.')
return
# check mode
mode = args[0]
if mode not in ['-pl', '-s']:
print('Arguments error, use `help play` for more information.')
return
# play from playlist
if mode == '-pl':
if len(self.playlist) == 0:
print('No playlist found, use `cpl` to create one.')
return
try:
pl_index = int(args[1])
assert pl_index >= 1 and pl_index <= len(self.playlist)
except:
print('playlist index should be in [1, {}].'.format(len(self.playlist)))
return
pl_name = self.playlist[pl_index-1]
pl_path = os.path.join(self.LIST_DIR, pl_name+'.json')
with open(pl_path, 'r') as f:
song_list = json.load(f)
if len(song_list) == 0:
print('No song found in playlist {}.'.format(pl_name))
return
play_pl_thread = threading.Thread(
target=self._play_from_playlist,
args=(song_list,))
play_pl_thread.start()
# play from search result
elif mode == '-s':
if len(self.search_result['content']) == 0:
print('No search result, do search first.')
return
s_index_upper_limit = len(self.search_result['content']) if len(self.search_result['content']) <= 10 else 10
try:
s_index = int(args[1])
assert s_index >= 1 and s_index <= s_index_upper_limit
except:
print('The index in search result must be in [1, {}].'.format(s_index_upper_limit))
# song info
play_song = copy.deepcopy(self.search_result['content'][s_index-1])
play_song['source'] = self.search_result['source']
play_song['singers'] = play_song['singer_list'] if play_song['source'] == 'migu' else convert_singerlist(play_song['singer_list'])
play_song['interval'] = '' if play_song['source'] == 'migu' else convert_interval(play_song['interval'])
play_s_thread = threading.Thread(target=self._play_from_search, args=(play_song,))
play_s_thread.start()
def do_m(self, arg):
''' pause or continue the player '''
try:
if self.player._get_property('pause') == False:
self.player._set_property('pause', True)
print('pause.')
else:
self.player._set_property('pause', False)
print('continue')
except:
print('No song is being played, use `play` first.')
def _play_from_search(self, song):
# song url
if song['source'] == 'migu':
url = song['url']
elif song['source'] == 'qq':
url = QQMusicApi().get_url(song['song_mid'])
elif song['source'] == 'netease':
url = NeteaseCloudMusicAPI().get_url(song['song_mid'])
# check url
if url == '':
print('You need {} vip to listen {}.'.format(
self.search_result['source'],
play_song['song_name']))
return
# player is occupied by playing pl
if self.playing == True:
self.interrupt = True
self.player.terminate()
self.playing = True
self.player = mpv.MPV(ytdl=True)
self.event_s = threading.Event()
play_thread = threading.Thread(target=self.__play_search_base, args=(url,))
play_thread.start()
self.current_song = song
self.event_s.wait()
if self.interrupt == True:
self.interrupt = False
return
self.playing = False
self.player.terminate()
def __play_search_base(self, url):
self.player.play(url)
self.player.wait_for_playback()
self.current_song = None
self.event_s.set()
def _play_from_playlist(self, song_list):
if self.playing == True:
self.interrupt = True
self.player.terminate()
self.playing = True
self.player = mpv.MPV(ytdl=True)
for song in song_list:
if song['source'] == 'migu':
url = song['url']
elif song['source'] == 'qq':
url = QQMusicApi().get_url(song['song_mid'])
elif song['source'] == 'netease':
url = NeteaseCloudMusicAPI().get_url(song['song_mid'])
if url == '':
print('You need {} vip to listen {}.'.format(
song['source'],
song['song_name']))
continue
self.event_pl = threading.Event()
play_thread = threading.Thread(target=self.__play_pl_base, args=(url,))
play_thread.start()
self.current_song = song
self.event_pl.wait()
# interrupt by playing search
if self.interrupt == True:
self.interrupt = False
return
self.playing = False
self.player.terminate()
def __play_pl_base(self, url):
self.player.play(url)
self.player.wait_for_playback()
self.current_song = None
self.event_pl.set()
def do_i(self, arg):
''' print current playing song's info '''
if self.current_song == None:
print('No song is playing.')
return
# print(self.current_song)
try:
# print(self.player._get_property('time-pos'))
if self.current_song['source'] == 'migu':
total_time = ''
else:
total_time = '/' + self.current_song['interval']
print('Playing {} - {} {}{}'.format(
self.current_song['song_name'],
self.current_song['singers'],
convert_interval(int(self.player._get_property('time-pos'))),
total_time))
except:
print('No song is playing.')
def do_cs(self, arg):
''' change search source
arg must be in 'qq', 'netease', 'migu'
'''
if arg == '':
# stay current api
return
if arg == 'qq':
self.api = QQMusicApi()
print('Source is changed to qq music.')
elif arg == 'netease':
self.api = NeteaseCloudMusicAPI()
print('Source is changed to netease cloud music.')
elif arg == 'migu':
self.api = MiguMusicAPI()
print('Source is changed to migu music.')
else:
print('Music source must be in qq, netease or migu.')
def do_s(self, arg):
''' print current music source '''
print('Current source is \'{}\''.format(self.api.name))
def do_quit(self, arg):
print('Wish you good luck.')
return True
def do_bye(self, arg):
return self.do_quit(arg)
def do_exit(self, arg):
return self.do_quit(arg)
def default(self, arg):
print('Command not defined, use help for more information.')
def _get_playlist_info(self):
self.LIST_DIR = 'userdata/playlist'
self.playlist.clear()
for file_path in os.listdir(self.LIST_DIR):
playlist_name = file_path.split('.')[0]
self.playlist.append(playlist_name)
def _show_search(self):
''' show first 10 search result from `list` self.search_result '''
print('{}{}{}{}{}'.format(
self._rpad('', 4),
self._rpad('歌曲', 30),
self._rpad('歌手', 20),
self._rpad('专辑', 30),
self._rpad('时长', 10)))
print('-'*100)
index = 1
for result in self.search_result['content']:
if self.search_result['source'] != 'migu':
singers = convert_singerlist(result['singer_list'])
else:
singers = result['singer_list']
print('{}{}{}{}{}'.format(
self._rpad(str(index)+'.', 4),
self._rpad(result['song_name'], 30),
self._rpad(singers, 20),
self._rpad(result['album_name'], 30),
self._rpad(convert_interval(result['interval']), 10)))
index += 1
if index == 11: # show only ten results
break
print('-'*100)
def _lpad(self, text, length, padding=' '):
''' left pad for printing, not used '''
return padding * max(0, (length-wcswidth(text))) + text
def _rpad(self, text, length, padding=' '):
''' right pad for printing
if len(text) is larger than length, shorten the text
'''
while length - wcswidth(text) <= 0:
text = text[0: len(text)-1]
return text + padding * max(0, (length-wcswidth(text)))
if __name__ == '__main__':
MusicShell().cmdloop()
|
server__handle_stdin__input.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import logging
import time
from threading import Thread
from flask import Flask
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG)
text = "Hello World!"
def go():
time.sleep(2)
print('\n')
global text
while True:
text = input(f'Current text is "{text}". New next: ')
@app.route("/")
def index():
return text
if __name__ == '__main__':
thread = Thread(target=go)
thread.start()
# app.debug = True
# Localhost
# port=0 -- random free port
# app.run(port=0)
app.run(port=5000)
# # Public IP
# app.run(host='0.0.0.0')
|
multiprocessing_test.py | #!usr/bin/python
# -*- coding:utf8 -*-
# import os
# import time
# # fork只能用于linux下面
# pid = os.fork()
# print("bobby")
# if pid == 0:
# print('子进程 {}, 父进程是: {}'.format(os.getpid(), os.getppid()))
# else:
# print('我是父进程: {}'.format(pid))
#
# time.sleep(2)
from concurrent.futures import ProcessPoolExecutor
import multiprocessing
# 多进程编程
import time
def get_html(n):
time.sleep(n)
print("sub_progress success")
return n
# class MyProgress(multiprocessing.Process):
# def run(self):
if __name__ == "__main__":
progress = multiprocessing.Process(target=get_html, args=(2, ))
print(progress.pid)
progress.start()
print(progress.pid)
progress.join()
print('main progress end')
#使用线程池
pool = multiprocessing.Pool(multiprocessing.cpu_count())
# result = pool.apply_async(get_html, args=(3,))
#
# #等待所有任务完成
# pool.close()
# pool.join()
#
# print(result.get())
#imap
# for result in pool.imap(get_html, [1,5,3]):
# print("{} sleep success".format(result))
for result in pool.imap_unordered(get_html, [1,5,3]):
print("{} sleep success".format(result))
|
presenter_agent.py | """
@Date: 2020-12-09 23:28:01
@LastEditTime: 2020-12-20 22:59:23
@FilePath: /colorization_video_python/atlas_utils/presenteragent/presenter_agent.py
# !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
import time
#from threading import Thread
import threading
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from ..acl_logger import log_error, log_info
from .socket_client import AgentSocket
from . import presenter_message as pm
from . import presenter_datatype as datatype
class PresenterAgent(object):
"""
Connection process
"""
def __init__(self, server_ip, port):
self.socket = AgentSocket(server_ip, port)
self._closed = False
def connect_server(self):
"""
connect server
"""
return self.socket.connect()
def start_heard_beat_thread(self):
"""
start heard beat thread
"""
self.heart_beat_thread = threading.Thread(target=self._keep_alive)
self.heart_beat_thread.start()
def _keep_alive(self):
"""
keep alive and judge whether to quit
"""
msg = pm.heartbeat_message()
while True:
if self._closed:
log_error("Heard beat thread exit")
break
self.socket.send_msg(msg)
time.sleep(2)
def exit(self):
"""
close socket
"""
self.socket.close()
self._closed = True
def StartPresenterAgent(msg_queue, server_ip, port, open_status, data_respone_counter):
"""
Connection status and response
"""
agent = PresenterAgent(server_ip, port)
ret = agent.connect_server()
if ret:
log_error("Connect server failed, ret = %d " % ret)
return
open_status.value = datatype.STATUS_CONNECTED
while True:
data = msg_queue.get()
if open_status.value == datatype.STATUS_EXITING:
open_status.value = datatype.STATUS_EXITTED
agent.exit()
break
if data:
agent.socket.send_msg(data)
msg_name, msg_body = agent.socket.recv_msg()
if (msg_name is None) or (msg_body is None):
log_error("Recv invalid message, message name %s " % msg_name)
continue
if ((open_status.value == datatype.STATUS_CONNECTED) and
pm.is_open_channel_response(msg_name)):
log_info("Received open channel respone")
open_status.value = datatype.STATUS_OPENED
agent.start_heard_beat_thread()
log_info("presenter agent change connect_status to ", open_status.value)
if ((open_status.value == datatype.STATUS_OPENED) and
pm.is_image_frame_response(msg_name)):
data_respone_counter.value += 1
#log_info("send ok ", data_respone_counter.value)
|
test_event_log.py | import os
import sys
import tempfile
import traceback
import pytest
import sqlalchemy
from dagster.core.errors import DagsterEventLogInvalidForRun
from dagster.core.storage.event_log import (
ConsolidatedSqliteEventLogStorage,
InMemoryEventLogStorage,
SqlEventLogStorageMetadata,
SqlEventLogStorageTable,
SqliteEventLogStorage,
)
from dagster.core.storage.sql import create_engine
from dagster.seven import multiprocessing
from .utils.event_log_storage import TestEventLogStorage
class TestInMemoryEventLogStorage(TestEventLogStorage):
__test__ = True
@pytest.fixture(scope="function", name="storage")
def event_log_storage(self): # pylint: disable=arguments-differ
return InMemoryEventLogStorage()
class TestSqliteEventLogStorage(TestEventLogStorage):
__test__ = True
@pytest.fixture(scope="function", name="storage")
def event_log_storage(self): # pylint: disable=arguments-differ
# make the temp dir in the cwd since default temp roots
# have issues with FS notif based event log watching
with tempfile.TemporaryDirectory(dir=os.getcwd()) as tmpdir_path:
storage = SqliteEventLogStorage(tmpdir_path)
try:
yield storage
finally:
storage.dispose()
def test_filesystem_event_log_storage_run_corrupted(self, storage):
# URL begins sqlite:///
# pylint: disable=protected-access
with open(os.path.abspath(storage.conn_string_for_shard("foo")[10:]), "w") as fd:
fd.write("some nonsense")
with pytest.raises(sqlalchemy.exc.DatabaseError):
storage.get_logs_for_run("foo")
def test_filesystem_event_log_storage_run_corrupted_bad_data(self, storage):
SqlEventLogStorageMetadata.create_all(create_engine(storage.conn_string_for_shard("foo")))
with storage.run_connection("foo") as conn:
event_insert = (
SqlEventLogStorageTable.insert().values( # pylint: disable=no-value-for-parameter
run_id="foo", event="{bar}", dagster_event_type=None, timestamp=None
)
)
conn.execute(event_insert)
with pytest.raises(DagsterEventLogInvalidForRun):
storage.get_logs_for_run("foo")
SqlEventLogStorageMetadata.create_all(create_engine(storage.conn_string_for_shard("bar")))
with storage.run_connection("bar") as conn:
event_insert = (
SqlEventLogStorageTable.insert().values( # pylint: disable=no-value-for-parameter
run_id="bar", event="3", dagster_event_type=None, timestamp=None
)
)
conn.execute(event_insert)
with pytest.raises(DagsterEventLogInvalidForRun):
storage.get_logs_for_run("bar")
def cmd(self, exceptions, tmpdir_path):
storage = SqliteEventLogStorage(tmpdir_path)
try:
storage.get_logs_for_run_by_log_id("foo")
except Exception as exc: # pylint: disable=broad-except
exceptions.put(exc)
exc_info = sys.exc_info()
traceback.print_tb(exc_info[2])
def test_concurrent_sqlite_event_log_connections(self, storage):
tmpdir_path = storage._base_dir # pylint: disable=protected-access
exceptions = multiprocessing.Queue()
ps = []
for _ in range(5):
ps.append(multiprocessing.Process(target=self.cmd, args=(exceptions, tmpdir_path)))
for p in ps:
p.start()
j = 0
for p in ps:
p.join()
j += 1
assert j == 5
excs = []
while not exceptions.empty():
excs.append(exceptions.get())
assert not excs, excs
class TestConsolidatedSqliteEventLogStorage(TestEventLogStorage):
__test__ = True
@pytest.fixture(scope="function", name="storage")
def event_log_storage(self): # pylint: disable=arguments-differ
# make the temp dir in the cwd since default temp roots
# have issues with FS notif based event log watching
with tempfile.TemporaryDirectory(dir=os.getcwd()) as tmpdir_path:
storage = ConsolidatedSqliteEventLogStorage(tmpdir_path)
try:
yield storage
finally:
storage.dispose()
|
server_main.py | """server_main module.
This module contains classes, containing server code.
Classes and their functions:
* GameServer - main loop, listening socket event handler, synchronization
event handlers.
* Resources - information about game resource pack.
* ResourceServer - HTTP server, used for resources downloading.
* HTTPHandler - HTTP request handler, used by ResourceServer.
* Player - information about player, player socket event handlers.
* PlayerList - Player objects storage, player state check.
* CLI - command line interface.
* GameState - global server state.
"""
import threading
import readline
from select import select
import socket
from http.server import ThreadingHTTPServer, SimpleHTTPRequestHandler
from random import shuffle, randrange
import os
import gettext
import json
from .monitor import Monitor
from .connection import connection
from . import environment as env
class Resources(Monitor):
"""Information about game resources.
Arguments:
* res_name (str): resources version.
* res_link (str): link for resources downloading.
* logger (logging.Logger): Logger object.
Object attributes:
* name (str): resources version name.
* link (str): resources version address.
* configuration(dict): number of cards in each card set.
"""
def __init__(self, res_name, res_link, logger):
Monitor.__init__(self)
self.name = res_name
self.link = res_link
self.logger = logger
self.configuration = None
path_prefix = os.path.dirname(os.path.abspath(__file__))
conf_path = path_prefix + "/resources/sets.json"
ver_path = path_prefix + "/resources/version.json"
try:
with open(conf_path) as conf:
self.configuration = json.load(conf)
except Exception:
self.logger.error("failed to load file resources/sets.json.")
try:
with open(ver_path) as ver:
self.name = json.load(ver)
except Exception:
self.logger.error("failed to load file resources/version.json.")
class HTTPHandler(SimpleHTTPRequestHandler):
"""Handler for HTTP requests."""
def __init__(self, *args, **kwargs):
SimpleHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
"""Log message from request handler.
Arguments:
* format (str): format string.
* args (list): arguments for format string.
"""
self.logger.info("HTTP: " + (format % args))
log_error = log_message
class ResourceServer(Monitor):
"""Server for resource pack downloading.
Arguments:
* logger (Logger): logging.Logger object.
Object attributes:
* logger (Logger): logger for handling log messages.
* server (ThreadingHTTPServer): server object.
* thread (Thread): thread for main server function.
* active (bool): server state.
"""
def __init__(self, logger):
Monitor.__init__(self)
self.logger = logger
self.server = None
self.thread = None
self.active = False
def main(self):
"""Main server function.
Server initialization and start.
"""
ip_addr = env.get_ip()
port = env.get_res_port()
handler = HTTPHandler
handler.logger = self.logger
file_path = os.path.dirname(os.path.abspath(__file__)) +\
"/resources/cards"
self.server = ThreadingHTTPServer((ip_addr, port),
(lambda *args, **kwargs:
handler(*args,
directory=file_path,
**kwargs)))
self.server.serve_forever(poll_interval=0.5)
self.server.server_close()
def start(self):
"""Start main server function in new thread."""
self.thread = threading.Thread(target=ResourceServer.main,
args=(self,))
self.thread.start()
self.active = True
def stop(self):
"""Stop server thread."""
self.server.shutdown()
self.thread.join()
self.active = False
class GameState(Monitor):
"""Information about global server state.
Arguments:
* initial_state (str): state for object initialization.
Object attributes:
* state (str): current server state.
* card_set (str): current card set name.
"""
def __init__(self, initial_state):
Monitor.__init__(self)
self.state = initial_state
self.card_set = "0"
class Player(Monitor):
"""Player class is used for player information handling.
Arguments:
* sock (socket): player socket.
* status (str): player status (MASTER or PLAYER).
* res (Resources): game resource pack information.
* game_st (GameState): global server state.
* plist (PlayerList): player list.
* number (int): player number.
* logger (logging.Logger): Logger object.
Object attributes:
* player_socket (socket): socket, connected to player.
* status (str): player status: MASTER or PLAYER.
* state (str): current player state.
* conn (connection): connection object, containing player_socket.
* valid (bool): error indicator.
* name (str): player name.
* res (Resources): resource pack information.
* game_st (GameState): server state.
* score (int): current player score.
* plist (PlayerList): PlayerList object.
* get_broadcast (bool): indicates, will this player receive broadcast
messages or not.
* cards (list): list of player's cards.
* number (int): player's number.
* current_card (int): current player card.
* selected_card (int): current selected card.
* buffer (list): list of messages waiting to be sent.
* has_buffer (bool): is True if buffer is not empty.
* has_turn (bool): is True if player is leader now.
"""
def __init__(self, sock, status, res, game_st, plist, number, logger):
Monitor.__init__(self)
self.player_socket = sock
self.status = status
self.state = "VER_CHECK"
self.conn = connection(self.player_socket)
self.valid = True
self.name = "Player"
self.res = res
self.game_st = game_st
self.score = 0
self.plist = plist
self.get_broadcast = False
self.cards = list()
self.number = number
self.logger = logger
self.current_card = None
self.selected_card = None
self.buffer = list()
self.has_buffer = False
self.has_turn = False
def __hash__(self):
return self.number
def stop(self):
"""Disconnect player."""
self.valid = False
if self.conn is not None:
self.conn.close()
self.conn = None
def verify(self):
"""Check if there is no errors in Player object."""
return self.valid and self.conn.status
def send_message(self, data):
"""Put message into output buffer.
Arguments:
* data (str): message.
"""
self.buffer.append(data)
self.has_buffer = True
def push_buffer(self):
"""Send all queued messages."""
while len(self.buffer) > 0:
self.conn.send(self.buffer.pop(0))
self.has_buffer = False
# Player states:
#
# |
# v
# VER_CHECK
# v [Send version info][hndl]
# VER_WAIT
# v [Get OK <name>][msg]
# START_WAIT
# v [game_state -> GAME][hndl][msg]
# BEGIN_SYNC
# v [All reached sync][main]
# READY_WAIT
# v [Get READY][msg]
# TURN_SYNC <---------------------------------\
# v [All reached sync][main] |
# WAIT_ASSOC |
# v [Get assoc, sent to all][msg] |
# WAIT_SELF_CARD |
# v [Get self card][hndl][msg] |
# SELF_SYNC |
# v [All reached sync][main] |
# WAIT_VOTE |
# v [Get vote card][hndl][msg] |
# VOTE_SYNC |
# v [All reached sync][main] |
# WAIT_NEXT_TURN |
# v [Get NEXT_TURN][msg] |
# SYNC_NEXT_TURN |
# | [All reached sync][main] |
# |----------------------------------------/
# v
def handle_message(self):
"""Receive and handle a message.
Performs player state changes.
"""
res = self.conn.get().split()
if self.state == "VER_WAIT":
if len(res) != 2 or res[0] != "OK":
self.valid = False
self.log_message("version check failed")
return
self.name = res[1]
self.get_broadcast = True
self.plist.broadcast("#PLAYER_LIST")
self.state = "START_WAIT"
elif self.state == "START_WAIT":
if self.status != "MASTER":
self.valid = False
self.log_message("receive START_GAME message")
return
if len(res) != 2 or res[0] != "START_GAME":
self.valid = False
self.log_message("expected START_GAME message")
return
self.game_st.state = "GAME"
self.game_st.card_set = res[1]
self.state = "BEGIN_SYNC"
elif self.state == "READY_WAIT":
if len(res) != 1 or res[0] != "READY":
self.valid = False
self.log_message("did not receive READY")
return
self.state = "TURN_SYNC"
elif self.state == "WAIT_ASSOC":
if not self.has_turn:
self.valid = False
self.log_message("receive association from wrong player")
return
if len(res) < 3 or res[0] != "TURN" or not res[1].isnumeric():
self.valid = False
for player in self.plist:
player.state = "TURN_SYNC"
self.log_message("expected association message")
return
self.current_card = int(res[1])
self.selected_card = -1
for player in self.plist:
player.state = "WAIT_SELF_CARD"
self.plist.broadcast("ASSOC " + " ".join(res[2:]))
elif self.state == "WAIT_SELF_CARD":
if not self.has_turn:
if len(res) < 2 or res[0] != "CARD" or not res[1].isnumeric():
self.valid = False
self.log_message("expected CARD message")
return
self.current_card = int(res[1])
self.plist.broadcast("#SELF", self)
self.state = "SELF_SYNC"
else:
self.valid = False
self.log_message("received unexpected message")
elif self.state == "WAIT_VOTE":
if not self.has_turn:
if len(res) < 2 or res[0] != "CARD" or not res[1].isnumeric():
self.valid = False
self.log_message("expected CARD message")
return
self.selected_card = int(res[1])
self.state = "VOTE_SYNC"
else:
self.valid = False
self.log_message("received unexpected message")
elif self.state == "WAIT_NEXT_TURN":
if len(res) != 1 or res[0] != "NEXT_TURN":
self.valid = False
self.log_message("expected NEXT_TURN message")
return
self.state = "SYNC_NEXT_TURN"
else:
self.log_message("error state reached: " + self.state)
self.valid = False
def handle_state(self):
"""Player state handling.
Check player and server states and change player state if necessary.
"""
if self.state == "VER_CHECK":
self.send_message("VERSION %s %s %s %s" % (str(self.number),
self.status, self.res.name, self.res.link))
self.state = "VER_WAIT"
if self.state == "START_WAIT" and self.game_st.state == "GAME":
self.state = "BEGIN_SYNC"
if self.state == "VER_WAIT" and self.game_st.state == "GAME":
self.valid = False
if self.state == "WAIT_SELF_CARD" and self.has_turn:
self.state = "SELF_SYNC"
self.plist.broadcast("#SELF", self)
if self.state == "WAIT_VOTE" and self.has_turn:
self.state = "VOTE_SYNC"
def log_message(self, message):
"""Print message into log file.
Arguments:
* message (str): logged message.
"""
self.logger.info("Player %d,%s: %s.", self.number, self.name, message)
class CLI(Monitor):
"""Game server command line interface.
Arguments:
* players (PlayerList): player list.
* server (GameServer): server object.
* game_st (GameState): server state.
Object attributes:
* players (PlayerList): player list.
* server (GameServer): server object.
* game_st (GameState): server state.
* thread (Thread): Thread object, containing information about
command line interface thread.
* work (bool): flag, indicating operation of command line interface
main loop.
"""
def __init__(self, players, server, game_st):
Monitor.__init__(self)
self.players = players
self.server = server
self.game_st = game_st
readline.set_completer(self.completer)
readline.set_completer_delims("")
gettext.install("server", os.path.dirname(os.path.abspath(__file__)),
names=("ngettext",))
readline.parse_and_bind("tab: complete")
self.thread = None
self.work = False
def start(self):
"""Start thread for command line interface."""
self.thread = threading.Thread(target=CLI.main, args=(self,))
self.thread.start()
def stop(self):
"""Stop command line interface thread."""
self.work = False
self.thread.join()
def main(self):
"""Main command line interface function."""
self.work = True
print("CLI started")
while self.work:
try:
cmdline = input("\x1b[1;32m[%s]\x1b[0m$ " %
self.game_st.state).split()
except Exception as ex:
print("error:", str(ex))
continue
if not self.work:
break
if len(cmdline) == 0:
continue
try:
if cmdline[0] == "help":
self.comm_help()
elif cmdline[0] == "players":
self.comm_players()
elif cmdline[0] == "start":
self.comm_start(cmdline)
elif cmdline[0] == "stop":
self.comm_stop()
elif cmdline[0] == "end":
self.comm_end()
else:
print("error: unknown command")
except Exception as ex:
print("error: " + str(ex))
def completer(self, text, state):
"""Function for command completion.
Arguments:
* text (str): current input buffer.
* state (int): match number.
"""
commands = ["help", "players", "stop", "end", "start "]
if text.startswith("start "):
if self.server.resources is not None and\
self.server.resources.configuration is not None:
commands.clear()
for i in self.server.resources.configuration:
commands.append("start %s" % i)
for i in commands:
if i.startswith(text):
if state == 0:
return i
state = state - 1
return None
def comm_help(self):
"""Execute 'help' command."""
print("%s:\n\nhelp\nplayers\nstart <card set>\nend\nstop" %
_("commands"))
def comm_players(self):
"""Execute 'players' command."""
if self.players is not None:
self.players.acquire()
print(len(self.players), ngettext("player", "players",
int(len(self.players))))
out = list()
str_num, str_name, str_score = _("number"), _("name"), _("score")
m_len = [len(str_num), len(str_name), len(str_score)]
for i in self.players:
out.append((str(i.number), i.name, str(i.score),
i.status == "MASTER"))
m_len = [max(m_len[0], len(str(i.number))),
max(m_len[1], len(i.name)),
max(m_len[2], len(str(i.score)))]
if len(out) > 0:
print(str_num + " " * (m_len[0] - len(str_num)),
str_name + " " * (m_len[1] - len(str_name)),
str_score + " " * (m_len[2] - len(str_score)))
for i in out:
string = ""
for idx in range(3):
string += i[idx] + " " * (m_len[idx] - len(i[idx]) + 1)
if i[3]:
string += _("master")
print(string.strip())
self.players.release()
else:
print(_("error: player list is not available"))
def comm_start(self, cmdline):
"""Execute 'start' command.
Arguments:
* cmdline (list): split command line.
"""
if self.game_st.state != "PLAYER_CONN":
return
if len(cmdline) == 2:
self.game_st.card_set = cmdline[1]
self.game_st.state = "GAME"
print(_("Starting game."))
else:
print(_("error: expected start <card set>"))
def comm_end(self):
"""Execute 'end' command."""
if self.game_st.state == "GAME":
self.players.acquire()
for player in self.players:
player.cards.clear()
self.server.cards.clear()
self.players.release()
else:
print(_("error: game is not started"))
def comm_stop(self):
"""Execute 'stop' command."""
self.game_st.state = "SHUTDOWN"
print(_("exit"))
self.work = False
class PlayerList(Monitor):
"""Player objects container.
Arguments:
* logger (logging.Logger): Logger object.
* game_st (GameState): server state.
Object attributes:
* players (list): Player objects list.
* sockets (dict): association between sockets and their players.
* game_st (GameState): server state.
* sem (Semaphore): semaphore for operations on this object.
* seq_number (int): number for new player.
* have_master (bool): is True if there is MASTER player in the list.
* logger (logging.Logger): Logger object.
"""
def __init__(self, logger, game_st):
Monitor.__init__(self)
self.players = list()
self.sockets = dict()
self.logger = logger
self.game_st = game_st
self.sem = threading.Semaphore(1)
self.have_master = False
self.seq_number = 0
def __iter__(self):
for player in self.players:
if player.valid:
yield player
def __len__(self):
return len(tuple(iter(self)))
def acquire(self):
"""Acquire semaphore."""
self.sem.acquire()
def release(self):
"""Release semaphore."""
self.sem.release()
def check(self):
"""Check for Player objects to be removed."""
for player in self.players:
if not player.verify():
player.push_buffer()
player.stop()
self.sockets.pop(player.player_socket)
if player.status == "MASTER" and\
self.game_st.state == "PLAYER_CONN":
self.game_st.state = "ERROR"
self.players.remove(player)
if self.game_st.state == "PLAYER_CONN":
self.broadcast("#PLAYER_LIST")
break
def next_player(self, player):
"""Get next player in player sequence.
Arguments:
* player (Player): current player.
"""
p_idx = 0
for i in range(len(self.players)):
if self.players[i] is player:
p_idx = i
break
iteration = 0
while iteration < len(self.players) * 2:
p_idx += 1
iteration += 1
if p_idx >= len(self.players):
p_idx = 0
if self.players[p_idx].valid:
return self.players[p_idx]
return None
def stop(self):
"""Disconnect all players and delete all Player objects."""
for i in range(len(self.players)):
self.players[i].push_buffer()
self.players[i].stop()
self.players.clear()
self.sockets.clear()
def add_player(self, res, sock):
"""Add new player to PlayerList.
Arguments:
* res (Resources): resource pack information.
* sock (socket): player socket.
"""
new_player = Player(sock, "PLAYER" if self.have_master else "MASTER",
res, self.game_st, self,
self.seq_number, self.logger)
self.players.append(new_player)
self.sockets[sock] = new_player
self.have_master = True
self.seq_number += 1
def broadcast(self, data, info=None):
"""Send broadcast message.
Arguments:
* data (str): message text.
* info (Player): additional info for message.
"""
if data == "#PLAYER_LIST":
data = "PLAYER_LIST " + ",".join([str(i.number) + ";" + i.name
for i in self
if i.get_broadcast])
if data == "#SELF":
data = "PLAYER " + str(info.number)
for i in self:
if i.get_broadcast:
i.send_message(data)
class GameServer:
"""Main game server class.
Arguments:
* listening_socket (socket): listening socket, that will be used
by server.
* logger (logging.Logger): Logger object.
Object attributes:
* logger (logging.Logger): Logger object.
* game_state (GameState): server state.
* players (PlayerList): player list.
* cards (list): current list of cards.
* resource_server (ResourceServer): HTTP server for resources
downloading.
* resources (Resources): resource pack information.
* cli (CLI): command line interface object.
"""
def __init__(self, listening_socket, logger):
self.listening_socket = listening_socket
self.logger = logger
self.game_state = None
self.players = None
self.cards = list()
self.resource_server = None
self.resources = None
self.cli = None
self.current_player = None
self.session_id = 0
def main(self):
"""Game server main function.
Contains main loop and handlers' calls.
"""
self.game_state = GameState("PLAYER_CONN")
self.cli = CLI(None, self, self.game_state)
self.cli.start()
while self.game_state.state != "SHUTDOWN":
self.prepare()
work = True
self.logger.info("Starting session %d." % self.session_id)
while work:
self.check_resource_server()
if self.game_state.state != "PLAYER_CONN" and\
self.game_state.state != "GAME":
self.logger.info("Detected state %s, exit.",
self.game_state.state)
work = False
continue
if len(self.players) == 0 and\
self.game_state.state != "PLAYER_CONN":
self.logger.info("No players left in game, exit.")
work = False
continue
rlist = [player.player_socket for player in self.players]
rlist.append(self.listening_socket)
wlist = [player.player_socket for player in self.players
if player.has_buffer]
flows = select(rlist, wlist, list(), 0.5)
self.players.acquire()
# Push buffers
for flow in flows[1]:
player = self.players.sockets[flow]
player.conn.send(player.buffer.pop(0))
player.has_buffer = len(player.buffer) > 0
# Handle input
for flow in flows[0]:
if flow is self.listening_socket:
self.accept_connection()
else:
self.players.sockets[flow].handle_message()
# Do player events according to state
for player in self.players:
player.handle_state()
# Global operations
self.global_operations()
self.players.check()
self.players.release()
self.players.stop()
self.logger.info("Closing session %d." % self.session_id)
self.session_id += 1
self.cli.stop()
def get_sync_state(self):
"""Check if all players have the same state."""
st = None
for player in self.players:
if st is None:
st = player.state
else:
if player.state != st:
return None
return st
def prepare(self):
"""Initialize components before players' connection."""
self.game_state.state = "PLAYER_CONN"
self.players = PlayerList(self.logger, self.game_state)
self.resources = Resources(env.get_res_name(), env.get_res_link(),
self.logger)
self.cli.players = self.players
self.resource_server = ResourceServer(self.logger)
self.current_player = None
def accept_connection(self):
"""Accept a new connection.
Accepts a new connection and checks if the player can be added
to the game.
"""
new_conn = self.listening_socket.accept()
if self.game_state.state == "PLAYER_CONN":
if len(self.players) < 7:
self.players.add_player(self.resources, new_conn[0])
return
self.logger.info("Disconnected: %s.", str(new_conn[1]))
new_conn[0].shutdown(socket.SHUT_RDWR)
new_conn[0].close()
def begin_game(self):
"""Setup before game start."""
try:
card_num = self.resources.configuration[self.game_state.card_set]
except Exception:
card_num = 50
self.logger.error("no configuration entry for '%s'." %
self.game_state.card_set)
self.cards = list(range(card_num))
shuffle(self.cards)
self.logger.info("Card set: %s, number of cards: %s." %
(self.game_state.card_set, str(card_num)))
if len(self.players) == 4:
self.cards = self.cards[:len(self.cards) - 2]
elif len(self.players) == 5:
self.cards = self.cards[:len(self.cards) - 23]
elif len(self.players) == 6:
self.cards = self.cards[:len(self.cards) - 26]
for player in self.players:
player.cards = self.cards[:6]
self.cards = self.cards[6:]
def calculate_result(self):
"""Update players' score."""
current_card = self.current_player.current_card
result = {p: 0 for p in self.players}
for i in self.players:
for player in self.players:
if player is not self.current_player:
if i.current_card == player.selected_card:
result[i] += 1
if result[self.current_player] == len(self.players) - 1:
for player in self.players:
if player is not self.current_player:
player.score += 3
else:
if result[self.current_player] != 0:
for player in self.players:
if player is not self.current_player:
if player.selected_card == current_card:
player.score += 3
self.current_player.score += 3
for i in self.players:
i.score += result[i]
def global_operations(self):
"""Check player state changes in synchronization points."""
if self.current_player is not None:
if not self.current_player.verify():
for player in self.players:
player.state = "TURN_SYNC"
cond = self.get_sync_state()
if cond == "BEGIN_SYNC":
if len(self.players) > 0:
self.begin_game()
player_lst = ",".join([str(i.number) + ";" + i.name
for i in self.players
if i.get_broadcast])
for player in self.players:
player.state = "READY_WAIT"
player.send_message("BEGIN %s %s %s" %
(self.game_state.card_set,
",".join(map(str, player.cards)),
player_lst))
self.current_player = self.players.next_player(
self.players.players[randrange(len(
self.players.players))])
else:
self.logger.info("Started game without players, exit.")
self.game_state.state = "ERROR"
elif cond == "TURN_SYNC":
self.current_player = self.players.next_player(self.current_player)
for player in self.players:
player.has_turn = player is self.current_player
player.state = "WAIT_ASSOC"
self.players.broadcast("TURN %s" %
str(self.current_player.number))
elif cond == "SELF_SYNC":
card_list = [player.current_card
for player in self.players]
shuffle(card_list)
self.players.broadcast("VOTE %s" %
",".join(map(str, card_list)))
for player in self.players:
player.state = "WAIT_VOTE"
elif cond == "VOTE_SYNC":
self.calculate_result()
card_list = ["%s;%s;%s" % (str(player.number),
str(player.current_card),
str(player.selected_card))
for player in self.players]
score_list = ["%s;%s" % (str(player.number),
str(player.score))
for player in self.players]
self.players.broadcast("STATUS %s %s %s" %
(str(self.current_player.current_card),
",".join(card_list),
",".join(score_list)))
for player in self.players:
player.state = "WAIT_NEXT_TURN"
elif cond == "SYNC_NEXT_TURN":
for player in self.players:
for card in player.cards:
if card == player.current_card:
player.cards.remove(card)
break
if len(self.cards) >= len(self.players):
for player in self.players:
player.cards.append(self.cards.pop(0))
if len(tuple(self.players)[0].cards) > 0:
for player in self.players:
player.send_message("CARDS %s" %
",".join(map(str, player.cards)))
player.state = "TURN_SYNC"
else:
self.players.broadcast("END_GAME")
for player in self.players:
player.valid = False
def check_resource_server(self):
"""Check game state and start or stop resource server if necessary."""
if self.game_state.state == "PLAYER_CONN" and\
not self.resource_server.active:
self.logger.info("Start resource server.")
self.resource_server.start()
if self.game_state.state != "PLAYER_CONN" and\
self.resource_server.active:
self.logger.info("Stop resource server.")
self.resource_server.stop()
|
gdal2tiles.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id$
#
# Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/)
# Support: BRGM (http://www.brgm.fr)
# Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory.
# - generate Google Earth metadata (KML SuperOverlay)
# - generate simple HTML viewer based on Google Maps and OpenLayers
# - support of global tiles (Spherical Mercator) for compatibility
# with interactive web maps a la Google Maps
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
# GUI: http://www.maptiler.org/
#
###############################################################################
# Copyright (c) 2008, Klokan Petr Pridal
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
from __future__ import print_function, division
import math
from multiprocessing import Pipe, Pool, Process, Manager
import os
import tempfile
import shutil
import sys
import time
from uuid import uuid4
from xml.etree import ElementTree
from osgeo import gdal
from osgeo import osr
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
except Exception:
# 'antialias' resampling is not available
pass
__version__ = "$Id$"
resampling_list = ('average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos', 'antialias')
profile_list = ('mercator', 'geodetic', 'raster')
webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'none')
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = """
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:3857)
for Google Maps, Yahoo Maps, Bing Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it useful for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
MAXZOOMLEVEL = 32
class GlobalMercator(object):
r"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:3857.
Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in meters XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:387
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:3857?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:3857?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yes?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually
noticeable.
How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:3857'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is designated as EPSG:3857. WKT definition is in the
official EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPSG:3857:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:3857"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:3857 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:3857"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:3857 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:3857 coordinates"
minx, miny = self.PixelsToMeters(tx*self.tileSize, ty*self.tileSize, zoom)
maxx, maxy = self.PixelsToMeters((tx+1)*self.tileSize, (ty+1)*self.tileSize, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latitude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != -1:
return i-1
else:
return 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i-1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
class GlobalGeodetic(object):
r"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tmscompatible, tileSize=256):
self.tileSize = tileSize
if tmscompatible is not None:
# Defaults the resolution factor to 0.703125 (2 tiles @ level 0)
# Adhers to OSGeo TMS spec
# http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic
self.resFact = 180.0 / self.tileSize
else:
# Defaults the resolution factor to 1.40625 (1 tile @ level 0)
# Adheres OpenLayers, MapProxy, etc default resolution for WMTS
self.resFact = 360.0 / self.tileSize
def LonLatToPixels(self, lon, lat, zoom):
"Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = self.resFact / 2**zoom
px = (180 + lon) / res
py = (90 + lat) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def LonLatToTile(self, lon, lat, zoom):
"Returns the tile for zoom which covers given lon/lat coordinates"
px, py = self.LonLatToPixels(lon, lat, zoom)
return self.PixelsToTile(px, py)
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return self.resFact / 2**zoom
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i != 0:
return i-1
else:
return 0 # We don't want to scale up
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile"
res = self.resFact / 2**zoom
return (
tx*self.tileSize*res - 180,
ty*self.tileSize*res - 90,
(tx+1)*self.tileSize*res - 180,
(ty+1)*self.tileSize*res - 90
)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in the SWNE form"
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
class Zoomify(object):
"""
Tiles compatible with the Zoomify viewer
----------------------------------------
"""
def __init__(self, width, height, tilesize=256, tileformat='jpg'):
"""Initialization of the Zoomify tile tree"""
self.tilesize = tilesize
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tilesize), math.ceil(height / tilesize))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.append(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while (imagesize[0] > tilesize or imagesize[1] > tilesize):
imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers+1):
self.tileCountUpToTier.append(
self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] +
self.tileCountUpToTier[i-1]
)
def tilefilename(self, x, y, z):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]
return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256),
"%s-%s-%s.%s" % (z, x, y, self.tileformat))
class GDALError(Exception):
pass
def exit_with_error(message, details=""):
# Message printing and exit code kept from the way it worked using the OptionParser (in case
# someone parses the error output)
sys.stderr.write("Usage: gdal2tiles.py [options] input_file [output]\n\n")
sys.stderr.write("gdal2tiles.py: error: %s\n" % message)
if details:
sys.stderr.write("\n\n%s\n" % details)
sys.exit(2)
def generate_kml(tx, ty, tz, tileext, tilesize, tileswne, options, children=None, **args):
"""
Template for the KML. Returns filled string.
"""
if not children:
children = []
args['tx'], args['ty'], args['tz'] = tx, ty, tz
args['tileformat'] = tileext
if 'tilesize' not in args:
args['tilesize'] = tilesize
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tilesize'] / 2)
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tilesize'] * 8)
if children == []:
args['maxlodpixels'] = -1
if tx is None:
tilekml = False
args['title'] = options.title
else:
tilekml = True
args['title'] = "%d/%d/%d.kml" % (tz, tx, ty)
args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx is not None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = options.url
if not url:
if tilekml:
url = "../../"
else:
url = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" % args
if tilekml:
s += """
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(ty)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" % args
for cx, cy, cz in children:
csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz)
s += """
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" % (cz, cx, cy, args['tileformat'], cnorth, csouth, ceast, cwest,
args['minlodpixels'], url, cz, cx, cy)
s += """ </Document>
</kml>
"""
return s
def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tilesize = dstile.RasterXSize
tilebands = dstile.RasterCount
if options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands+1):
# Black border around NODATA
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i),
'average')
if res != 0:
exit_with_error("RegenerateOverview() failed on %s, error %d" % (
tilefilename, res))
elif options.resampling == 'antialias':
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8)
for i in range(tilebands):
array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i+1),
0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tilesize, tilesize), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, tiledriver)
else:
if options.resampling == 'near':
gdal_resampling = gdal.GRA_NearestNeighbour
elif options.resampling == 'bilinear':
gdal_resampling = gdal.GRA_Bilinear
elif options.resampling == 'cubic':
gdal_resampling = gdal.GRA_Cubic
elif options.resampling == 'cubicspline':
gdal_resampling = gdal.GRA_CubicSpline
elif options.resampling == 'lanczos':
gdal_resampling = gdal.GRA_Lanczos
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((0.0, tilesize / float(querysize), 0.0, 0.0, 0.0,
tilesize / float(querysize)))
dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling)
if res != 0:
exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res))
def setup_no_data_values(input_dataset, options):
"""
Extract the NODATA values from the dataset or use the passed arguments as override if any
"""
in_nodata = []
if options.srcnodata:
nds = list(map(float, options.srcnodata.split(',')))
if len(nds) < input_dataset.RasterCount:
in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]
else:
in_nodata = nds
else:
for i in range(1, input_dataset.RasterCount+1):
raster_no_data = input_dataset.GetRasterBand(i).GetNoDataValue()
if raster_no_data is not None:
in_nodata.append(raster_no_data)
if options.verbose:
print("NODATA: %s" % in_nodata)
return in_nodata
def setup_input_srs(input_dataset, options):
"""
Determines and returns the Input Spatial Reference System (SRS) as an osr object and as a
WKT representation
Uses in priority the one passed in the command line arguments. If None, tries to extract them
from the input dataset
"""
input_srs = None
input_srs_wkt = None
if options.s_srs:
input_srs = osr.SpatialReference()
input_srs.SetFromUserInput(options.s_srs)
input_srs_wkt = input_srs.ExportToWkt()
else:
input_srs_wkt = input_dataset.GetProjection()
if not input_srs_wkt and input_dataset.GetGCPCount() != 0:
input_srs_wkt = input_dataset.GetGCPProjection()
if input_srs_wkt:
input_srs = osr.SpatialReference()
input_srs.ImportFromWkt(input_srs_wkt)
return input_srs, input_srs_wkt
def setup_output_srs(input_srs, options):
"""
Setup the desired SRS (based on options)
"""
output_srs = osr.SpatialReference()
if options.profile == 'mercator':
output_srs.ImportFromEPSG(3857)
elif options.profile == 'geodetic':
output_srs.ImportFromEPSG(4326)
else:
output_srs = input_srs
return output_srs
def has_georeference(dataset):
return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or
dataset.GetGCPCount() != 0)
def reproject_dataset(from_dataset, from_srs, to_srs, options=None):
"""
Returns the input dataset in the expected "destination" SRS.
If the dataset is already in the correct SRS, returns it unmodified
"""
if not from_srs or not to_srs:
raise GDALError("from and to SRS must be defined to reproject the dataset")
if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0):
to_dataset = gdal.AutoCreateWarpedVRT(from_dataset,
from_srs.ExportToWkt(), to_srs.ExportToWkt())
if options and options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset)
return to_dataset
else:
return from_dataset
def add_gdal_warp_options_to_string(vrt_string, warp_options):
if not warp_options:
return vrt_string
vrt_root = ElementTree.fromstring(vrt_string)
options = vrt_root.find("GDALWarpOptions")
if options is None:
return vrt_string
for key, value in warp_options.items():
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": key})
tb.data(value)
tb.end("Option")
elem = tb.close()
options.insert(0, elem)
return ElementTree.tostring(vrt_root).decode()
def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):
"""
Takes an array of NODATA values and forces them on the WarpedVRT file dataset passed
"""
# TODO: gbataille - Seems that I forgot tests there
if nodata_values != []:
temp_file = gettempfilename('-gdal2tiles.vrt')
warped_vrt_dataset.GetDriver().CreateCopy(temp_file, warped_vrt_dataset)
with open(temp_file, 'r') as f:
vrt_string = f.read()
vrt_string = add_gdal_warp_options_to_string(
vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"})
# TODO: gbataille - check the need for this replacement. Seems to work without
# # replace BandMapping tag for NODATA bands....
# for i in range(len(nodata_values)):
# s = s.replace(
# '<BandMapping src="%i" dst="%i"/>' % ((i+1), (i+1)),
# """
# <BandMapping src="%i" dst="%i">
# <SrcNoDataReal>%i</SrcNoDataReal>
# <SrcNoDataImag>0</SrcNoDataImag>
# <DstNoDataReal>%i</DstNoDataReal>
# <DstNoDataImag>0</DstNoDataImag>
# </BandMapping>
# """ % ((i+1), (i+1), nodata_values[i], nodata_values[i]))
# save the corrected VRT
with open(temp_file, 'w') as f:
f.write(vrt_string)
corrected_dataset = gdal.Open(temp_file)
os.unlink(temp_file)
# set NODATA_VALUE metadata
corrected_dataset.SetMetadataItem(
'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))
if options and options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
# TODO: gbataille - test replacing that with a gdal write of the dataset (more
# accurately what's used, even if should be the same
with open("tiles1.vrt", "w") as f:
f.write(vrt_string)
return corrected_dataset
def add_alpha_band_to_string_vrt(vrt_string):
# TODO: gbataille - Old code speak of this being equivalent to gdalwarp -dstalpha
# To be checked
vrt_root = ElementTree.fromstring(vrt_string)
index = 0
nb_bands = 0
for subelem in list(vrt_root):
if subelem.tag == "VRTRasterBand":
nb_bands += 1
color_node = subelem.find("./ColorInterp")
if color_node is not None and color_node.text == "Alpha":
raise Exception("Alpha band already present")
else:
if nb_bands:
# This means that we are one element after the Band definitions
break
index += 1
tb = ElementTree.TreeBuilder()
tb.start("VRTRasterBand",
{'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"})
tb.start("ColorInterp", {})
tb.data("Alpha")
tb.end("ColorInterp")
tb.end("VRTRasterBand")
elem = tb.close()
vrt_root.insert(index, elem)
warp_options = vrt_root.find(".//GDALWarpOptions")
tb = ElementTree.TreeBuilder()
tb.start("DstAlphaBand", {})
tb.data(str(nb_bands + 1))
tb.end("DstAlphaBand")
elem = tb.close()
warp_options.append(elem)
# TODO: gbataille - this is a GDALWarpOptions. Why put it in a specific place?
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": "INIT_DEST"})
tb.data("0")
tb.end("Option")
elem = tb.close()
warp_options.append(elem)
return ElementTree.tostring(vrt_root).decode()
def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None):
"""
Handles dataset with 1 or 3 bands, i.e. without alpha channel, in the case the nodata value has
not been forced by options
"""
if warped_vrt_dataset.RasterCount in [1, 3]:
tempfilename = gettempfilename('-gdal2tiles.vrt')
warped_vrt_dataset.GetDriver().CreateCopy(tempfilename, warped_vrt_dataset)
with open(tempfilename) as f:
orig_data = f.read()
alpha_data = add_alpha_band_to_string_vrt(orig_data)
with open(tempfilename, 'w') as f:
f.write(alpha_data)
warped_vrt_dataset = gdal.Open(tempfilename)
os.unlink(tempfilename)
if options and options.verbose:
print("Modified -dstalpha warping result saved into 'tiles1.vrt'")
# TODO: gbataille - test replacing that with a gdal write of the dataset (more
# accurately what's used, even if should be the same
with open("tiles1.vrt", "w") as f:
f.write(alpha_data)
return warped_vrt_dataset
def nb_data_bands(dataset):
"""
Return the number of data (non-alpha) bands of a gdal dataset
"""
alphaband = dataset.GetRasterBand(1).GetMaskBand()
if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or
dataset.RasterCount == 4 or
dataset.RasterCount == 2):
return dataset.RasterCount - 1
else:
return dataset.RasterCount
def gettempfilename(suffix):
"""Returns a temporary filename"""
if '_' in os.environ:
# tempfile.mktemp() crashes on some Wine versions (the one of Ubuntu 12.04 particularly)
if os.environ['_'].find('wine') >= 0:
tmpdir = '.'
if 'TMP' in os.environ:
tmpdir = os.environ['TMP']
import time
import random
random.seed(time.time())
random_part = 'file%d' % random.randint(0, 1000000000)
return os.path.join(tmpdir, random_part + suffix)
return tempfile.mktemp(suffix)
def create_base_tile(tile_job_info, tile_detail, options, queue=None):
gdal.AllRegister()
dataBandsCount = tile_job_info.nb_data_bands
output = tile_job_info.output_file_path
tileext = tile_job_info.tile_extension
tilesize = tile_job_info.tile_size
options = tile_job_info.options
tilebands = dataBandsCount + 1
ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly)
mem_drv = gdal.GetDriverByName('MEM')
out_drv = gdal.GetDriverByName(tile_job_info.tile_driver)
alphaband = ds.GetRasterBand(1).GetMaskBand()
tx = tile_detail.tx
ty = tile_detail.ty
tz = tile_detail.tz
rx = tile_detail.rx
ry = tile_detail.ry
rxsize = tile_detail.rxsize
rysize = tile_detail.rysize
wx = tile_detail.wx
wy = tile_detail.wy
wxsize = tile_detail.wxsize
wysize = tile_detail.wysize
querysize = tile_detail.querysize
# Tile dataset in memory
tilefilename = os.path.join(
output, str(tz), str(tx), "%s.%s" % (ty, tileext))
dstile = mem_drv.Create('', tilesize, tilesize, tilebands)
data = alpha = None
if options.verbose:
print("\tReadRaster Extent: ",
(rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tilesize
# We scale down the query to the tilesize by supplied algorithm.
if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0:
data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize,
band_list=list(range(1, dataBandsCount+1)))
alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize)
# The tile in memory is a transparent file by default. Write pixel values into it if
# any
if data:
if tilesize == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount+1)))
dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW,
# MrSID) the ReadRaster function returns high-quality raster (not ugly
# nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
else:
# Big ReadRaster query in memory scaled to the tilesize - all but 'near'
# algo
dsquery = mem_drv.Create('', querysize, querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now
# only png tiles are supported)
dsquery.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount+1)))
dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options,
tilefilename=tilefilename)
del dsquery
# Force freeing the memory to make sure the C++ destructor is called and the memory as well as
# the file locks are released
del ds
del data
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_drv.CreateCopy(tilefilename, dstile, strict=0)
del dstile
# Create a KML file for this tile.
if tile_job_info.kml:
kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % ty)
if not options.resume or not os.path.exists(kmlfilename):
with open(kmlfilename, 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
tile_job_info.tile_swne, tile_job_info.options
).encode('utf-8'))
if queue:
queue.put("tile %s %s %s" % (tx, ty, tz))
def create_overview_tiles(tile_job_info, output_folder, options):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
mem_driver = gdal.GetDriverByName('MEM')
tile_driver = tile_job_info.tile_driver
out_driver = gdal.GetDriverByName(tile_driver)
tilebands = tile_job_info.nb_data_bands + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
tcount += (1 + abs(tmaxx-tminx)) * (1 + abs(tmaxy-tminy))
ti = 0
if tcount == 0:
return
if not options.quiet:
print("Generating Overview Tiles:")
progress_bar = ProgressBar(tcount)
progress_bar.start()
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
ytile = GDAL2Tiles.getYtile(ty, tz, options)
tilefilename = os.path.join(output_folder,
str(tz),
str(tx),
"%s.%s" % (ytile, tile_job_info.tile_extension))
if options.verbose:
print(ti, '/', tcount, tilefilename)
if options.resume and os.path.exists(tilefilename):
if options.verbose:
print("Tile generation skipped because of --resume")
else:
progress_bar.log_progress()
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,
2 * tile_job_info.tile_size, tilebands)
# TODO: fill the null value
dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,
tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y <= maxy:
ytile2 = GDAL2Tiles.getYtile(y, tz+1, options)
dsquerytile = gdal.Open(
os.path.join(output_folder, str(tz + 1), str(x),
"%s.%s" % (ytile2, tile_job_info.tile_extension)),
gdal.GA_ReadOnly)
if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):
tileposy = 0
else:
tileposy = tile_job_info.tile_size
if tx:
tileposx = x % (2 * tx) * tile_job_info.tile_size
elif tx == 0 and x == 1:
tileposx = tile_job_info.tile_size
else:
tileposx = 0
dsquery.WriteRaster(
tileposx, tileposy, tile_job_info.tile_size,
tile_job_info.tile_size,
dsquerytile.ReadRaster(0, 0,
tile_job_info.tile_size,
tile_job_info.tile_size),
band_list=list(range(1, tilebands + 1)))
children.append([x, y, tz + 1])
scale_query_to_tile(dsquery, dstile, tile_driver, options,
tilefilename=tilefilename)
# Write a copy of tile to png/jpg
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_driver.CreateCopy(tilefilename, dstile, strict=0)
if options.verbose:
print("\tbuild from zoom", tz + 1,
" tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))
# Create a KML file for this tile.
if tile_job_info.kml:
with open(os.path.join(
output_folder,
'%d/%d/%d.kml' % (tz, tx, ty)
), 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
get_tile_swne(tile_job_info, options), options, children
).encode('utf-8'))
if not options.verbose and not options.quiet:
progress_bar.log_progress()
def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = "Usage: %prog [options] input_file [output]"
p = OptionParser(usage, version="%prog " + __version__)
p.add_option("-p", "--profile", dest='profile',
type='choice', choices=profile_list,
help=("Tile cutting profile (%s) - default 'mercator' "
"(Google Maps compatible)" % ",".join(profile_list)))
p.add_option("-r", "--resampling", dest="resampling",
type='choice', choices=resampling_list,
help="Resampling method (%s) - default 'average'" % ",".join(resampling_list))
p.add_option("-s", "--s_srs", dest="s_srs", metavar="SRS",
help="The spatial reference system used for the source input data")
p.add_option("-z", "--zoom", dest="zoom",
help="Zoom levels to render (format:'2-5' or '10').")
p.add_option("-e", "--resume", dest="resume", action="store_true",
help="Resume mode. Generate only missing files.")
p.add_option("-a", "--srcnodata", dest="srcnodata", metavar="NODATA",
help="NODATA transparency value to assign to the input data")
p.add_option("-d", "--tmscompatible", dest="tmscompatible", action="store_true",
help=("When using the geodetic profile, specifies the base resolution "
"as 0.703125 or 2 tiles at zoom level 0."))
p.add_option("-x", "--xyz",
action='store_true', dest='xyz',
help="Use XYZ tile numbering instead of TMS")
p.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Print status messages to stdout")
p.add_option("-q", "--quiet",
action="store_true", dest="quiet",
help="Disable messages and status to stdout")
p.add_option("--processes",
dest="nb_processes",
type='int',
help="Number of processes to use for tiling")
# KML options
g = OptionGroup(p, "KML (Google Earth) options",
"Options for generated Google Earth SuperOverlay metadata")
g.add_option("-k", "--force-kml", dest='kml', action="store_true",
help=("Generate KML for Google Earth - default for 'geodetic' profile and "
"'raster' in EPSG:4326. For a dataset with different projection use "
"with caution!"))
g.add_option("-n", "--no-kml", dest='kml', action="store_false",
help="Avoid automatic generation of KML files for EPSG:4326")
g.add_option("-u", "--url", dest='url',
help="URL address where the generated tiles are going to be published")
p.add_option_group(g)
# HTML options
g = OptionGroup(p, "Web viewer options",
"Options for generated HTML viewers a la Google Maps")
g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list))
g.add_option("-t", "--title", dest='title',
help="Title of the map")
g.add_option("-c", "--copyright", dest='copyright',
help="Copyright for the map")
g.add_option("-g", "--googlekey", dest='googlekey',
help="Google Maps API key from http://code.google.com/apis/maps/signup.html")
g.add_option("-b", "--bingkey", dest='bingkey',
help="Bing Maps API key from https://www.bingmapsportal.com/")
p.add_option_group(g)
p.set_defaults(verbose=False, profile="mercator", kml=False, url='',
webviewer='all', copyright='', resampling='average', resume=False,
googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE',
processes=1)
return p
def process_args(argv):
parser = optparse_init()
options, args = parser.parse_args(args=argv)
# Args should be either an input file OR an input file and an output folder
if (len(args) == 0):
exit_with_error("You need to specify at least an input file as argument to the script")
if (len(args) > 2):
exit_with_error("Processing of several input files is not supported.",
"Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the "
"files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args))
input_file = args[0]
if not os.path.isfile(input_file):
exit_with_error("The provided input file %s does not exist or is not a file" % input_file)
if len(args) == 2:
output_folder = args[1]
else:
output_folder = os.path.basename(input_file)
options = options_post_processing(options, input_file, output_folder)
return input_file, output_folder, options
def options_post_processing(options, input_file, output_folder):
if not options.title:
options.title = os.path.basename(input_file)
if options.url and not options.url.endswith('/'):
options.url += '/'
if options.url:
out_path = output_folder
if out_path.endswith("/"):
out_path = out_path[:-1]
options.url += os.path.basename(out_path) + '/'
# Supported options
if options.resampling == 'average':
try:
if gdal.RegenerateOverview:
pass
except Exception:
exit_with_error("'average' resampling algorithm is not available.",
"Please use -r 'near' argument or upgrade to newer version of GDAL.")
elif options.resampling == 'antialias':
try:
if numpy: # pylint:disable=W0125
pass
except Exception:
exit_with_error("'antialias' resampling algorithm is not available.",
"Install PIL (Python Imaging Library) and numpy.")
try:
os.path.basename(input_file).encode('ascii')
except UnicodeEncodeError:
full_ascii = False
else:
full_ascii = True
# LC_CTYPE check
if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""):
if not options.quiet:
print("\nWARNING: "
"You are running gdal2tiles.py with a LC_CTYPE environment variable that is "
"not UTF-8 compatible, and your input file contains non-ascii characters. "
"The generated sample googlemaps, openlayers or "
"leaflet files might contain some invalid characters as a result\n")
# Output the results
if options.verbose:
print("Options:", options)
print("Input:", input_file)
print("Output:", output_folder)
print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024))
print('')
return options
class TileDetail(object):
tx = 0
ty = 0
tz = 0
rx = 0
ry = 0
rxsize = 0
rysize = 0
wx = 0
wy = 0
wxsize = 0
wysize = 0
querysize = 0
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __str__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __repr__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
class TileJobInfo(object):
"""
Plain object to hold tile job configuration for a dataset
"""
src_file = ""
nb_data_bands = 0
output_file_path = ""
tile_extension = ""
tile_size = 0
tile_driver = None
kml = False
tminmax = []
tminz = 0
tmaxz = 0
in_srs_wkt = 0
out_geo_trans = []
ominy = 0
is_epsg_4326 = False
options = None
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __str__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __repr__(self):
return "TileJobInfo %s\n" % (self.src_file)
class Gdal2TilesError(Exception):
pass
class GDAL2Tiles(object):
def __init__(self, input_file, output_folder, options):
"""Constructor function - initialization"""
self.out_drv = None
self.mem_drv = None
self.warped_input_dataset = None
self.out_srs = None
self.nativezoom = None
self.tminmax = None
self.tsize = None
self.mercator = None
self.geodetic = None
self.alphaband = None
self.dataBandsCount = None
self.out_gt = None
self.tileswne = None
self.swne = None
self.ominx = None
self.omaxx = None
self.omaxy = None
self.ominy = None
self.input_file = None
self.output_folder = None
# Tile format
self.tilesize = 256
self.tiledriver = 'PNG'
self.tileext = 'png'
self.tmp_dir = tempfile.mkdtemp()
self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt')
# Should we read bigger window of the input raster and scale it down?
# Note: Modified later by open_input()
# Not for 'near' resampling
# Not for Wavelet based drivers (JPEG2000, ECW, MrSID)
# Not for 'raster' profile
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorightm
self.querysize = 4 * self.tilesize
# Should we use Read on the input file for generating overview tiles?
# Note: Modified later by open_input()
# Otherwise the overview tiles are generated from existing underlying tiles
self.overviewquery = False
self.input_file = input_file
self.output_folder = output_folder
self.options = options
if self.options.resampling == 'near':
self.querysize = self.tilesize
elif self.options.resampling == 'bilinear':
self.querysize = self.tilesize * 2
# User specified zoom levels
self.tminz = None
self.tmaxz = None
if self.options.zoom:
minmax = self.options.zoom.split('-', 1)
minmax.extend([''])
zoom_min, zoom_max = minmax[:2]
self.tminz = int(zoom_min)
if zoom_max:
self.tmaxz = int(zoom_max)
else:
self.tmaxz = int(zoom_min)
# KML generation
self.kml = self.options.kml
# -------------------------------------------------------------------------
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.AllRegister()
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?",
self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?")
# Open the input file
if self.input_file:
input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly)
else:
raise Exception("No input file was specified")
if self.options.verbose:
print("Input file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
if not input_dataset:
# Note: GDAL prints the ERROR message too
exit_with_error("It is not possible to open the input file '%s'." % self.input_file)
# Read metadata from the input file
if input_dataset.RasterCount == 0:
exit_with_error("Input file '%s' has no raster band" % self.input_file)
if input_dataset.GetRasterBand(1).GetRasterColorTable():
exit_with_error(
"Please convert this file to RGB/RGBA and run gdal2tiles on the result.",
"From paletted file you can create RGBA file (temp.vrt) by:\n"
"gdal_translate -of vrt -expand rgba %s temp.vrt\n"
"then run:\n"
"gdal2tiles temp.vrt" % self.input_file
)
in_nodata = setup_no_data_values(input_dataset, self.options)
if self.options.verbose:
print("Preprocessed file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)
self.out_srs = setup_output_srs(in_srs, self.options)
# If input and output reference systems are different, we reproject the input dataset into
# the output reference system for easier manipulation
self.warped_input_dataset = None
if self.options.profile in ('mercator', 'geodetic'):
if not in_srs:
exit_with_error(
"Input file has unknown SRS.",
"Use --s_srs ESPG:xyz (or similar) to provide source reference system.")
if not has_georeference(input_dataset):
exit_with_error(
"There is no georeference - neither affine transformation (worldfile) "
"nor GCPs. You can generate only 'raster' profile tiles.",
"Either gdal2tiles with parameter -p 'raster' or use another GIS "
"software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if ((in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or
(input_dataset.GetGCPCount() != 0)):
self.warped_input_dataset = reproject_dataset(
input_dataset, in_srs, self.out_srs)
if in_nodata:
self.warped_input_dataset = update_no_data_values(
self.warped_input_dataset, in_nodata, options=self.options)
else:
self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(
self.warped_input_dataset, options=self.options)
if self.warped_input_dataset and self.options.verbose:
print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % (
self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize,
self.warped_input_dataset.RasterCount))
if not self.warped_input_dataset:
self.warped_input_dataset = input_dataset
self.warped_input_dataset.GetDriver().CreateCopy(self.tmp_vrt_filename,
self.warped_input_dataset)
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()
self.dataBandsCount = nb_data_bands(self.warped_input_dataset)
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print("KML autotest OK!")
# Read the georeference
self.out_gt = self.warped_input_dataset.GetGeoTransform()
# Test the size of the pixel
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
exit_with_error("Georeference of the raster contains rotation or skew. "
"Such raster is not supported. Please use gdalwarp first.")
# Here we expect: pixel is square, no rotation on the raster
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behaviour, when 0 becomes -1e-15
if self.options.verbose:
print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)
# Calculating ranges for tiles in different zoom levels
if self.options.profile == 'mercator':
self.mercator = GlobalMercator()
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**tz-1, tmaxx), min(2**tz-1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = self.mercator.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tilesize))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):",
self.mercator.MetersToLatLon(self.ominx, self.ominy),
self.mercator.MetersToLatLon(self.omaxx, self.omaxy))
print('MinZoomLevel:', self.tminz)
print("MaxZoomLevel:",
self.tmaxz,
"(",
self.mercator.Resolution(self.tmaxz),
")")
if self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible)
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**(tz+1)-1, tmaxx), min(2**tz-1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tminz is None:
self.tminz = self.geodetic.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tilesize))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])
if self.options.verbose:
print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy)
if self.options.profile == 'raster':
def log2(x):
return math.log10(x) / math.log10(2)
self.nativezoom = int(
max(math.ceil(log2(self.warped_input_dataset.RasterXSize/float(self.tilesize))),
math.ceil(log2(self.warped_input_dataset.RasterYSize/float(self.tilesize)))))
if self.options.verbose:
print("Native zoom of the raster:", self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz is None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz is None:
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz+1))
self.tsize = list(range(0, self.tmaxz+1))
for tz in range(0, self.tmaxz+1):
tsize = 2.0**(self.nativezoom-tz)*self.tilesize
tminx, tminy = 0, 0
tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1
tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2**(self.tmaxz-z) * self.out_gt[1]) # X-pixel size in level
west = self.out_gt[0] + x*self.tilesize*pixelsizex
east = west + self.tilesize*pixelsizex
south = self.ominy + y*self.tilesize*pixelsizex
north = south + self.tilesize*pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa
def generate_metadata(self):
"""
Generation of main metadata files and HTML viewers (metadata related to particular
tiles are generated during the tile processing).
"""
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if self.options.profile == 'mercator':
south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)
north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)
south, west = max(-85.05112878, south), max(-180.0, west)
north, east = min(85.05112878, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):
with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:
f.write(self.generate_googlemaps().encode('utf-8'))
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate leaflet.html
if self.options.webviewer in ('all', 'leaflet'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):
with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:
f.write(self.generate_leaflet().encode('utf-8'))
elif self.options.profile == 'geodetic':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
south, west = max(-90.0, south), max(-180.0, west)
north, east = min(90.0, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
elif self.options.profile == 'raster':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
self.swne = (south, west, north, east)
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate tilemapresource.xml.
if not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml')):
with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:
f.write(self.generate_tilemapresource().encode('utf-8'))
if self.kml:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
xmin, ymin, xmax, ymax = self.tminmax[self.tminz]
for x in range(xmin, xmax+1):
for y in range(ymin, ymax+1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):
with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:
f.write(generate_kml(
None, None, None, self.tileext, self.tilesize, self.tileswne,
self.options, children
).encode('utf-8'))
def generate_base_tiles(self):
"""
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
"""
if not self.options.quiet:
print("Generating Base Tiles:")
if self.options.verbose:
print('')
print("Tiles generated from the max zoom level:")
print("----------------------------------------")
print('')
# Set the bounds
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
ds = self.warped_input_dataset
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print("dataBandsCount: ", self.dataBandsCount)
print("tilebands: ", tilebands)
tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))
ti = 0
tile_details = []
tz = self.tmaxz
for ty in range(tmaxy, tminy-1, -1):
for tx in range(tminx, tmaxx+1):
ti += 1
ytile = GDAL2Tiles.getYtile(ty, tz, self.options)
tilefilename = os.path.join(
self.output_folder, str(tz), str(tx), "%s.%s" % (ytile, self.tileext))
if self.options.verbose:
print(ti, '/', tcount, tilefilename)
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print("Tile generation skipped because of --resume")
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:3857
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile in ('mercator', 'geodetic'):
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])
# Pixel size in the raster covering query geo extent
nativesize = wb[0] + wb[2]
if self.options.verbose:
print("\tNative Extent (querysize", nativesize, "): ", rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)
rx, ry, rxsize, rysize = rb
wx, wy, wxsize, wysize = wb
else: # 'raster' profile:
tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom
xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels
ysize = self.warped_input_dataset.RasterYSize
if tz >= self.nativezoom:
querysize = self.tilesize
rx = (tx) * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
ry = ysize - (ty * tsize) - rysize
wx, wy = 0, 0
wxsize = int(rxsize/float(tsize) * self.tilesize)
wysize = int(rysize/float(tsize) * self.tilesize)
if wysize != self.tilesize:
wy = self.tilesize - wysize
# Read the source raster if anything is going inside the tile as per the computed
# geo_query
tile_details.append(
TileDetail(
tx=tx, ty=ytile, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,
wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,
)
)
conf = TileJobInfo(
src_file=self.tmp_vrt_filename,
nb_data_bands=self.dataBandsCount,
output_file_path=self.output_folder,
tile_extension=self.tileext,
tile_driver=self.tiledriver,
tile_size=self.tilesize,
kml=self.kml,
tminmax=self.tminmax,
tminz=self.tminz,
tmaxz=self.tmaxz,
in_srs_wkt=self.in_srs_wkt,
out_geo_trans=self.out_gt,
ominy=self.ominy,
is_epsg_4326=self.isepsg4326,
options=self.options,
)
return conf, tile_details
def geo_query(self, ds, ulx, uly, lrx, lry, querysize=0):
"""
For given dataset and query in cartographic coordinates returns parameters for ReadRaster()
in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds.
raises Gdal2TilesError if the dataset does not contain anything inside this geo_query
"""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = int((lrx - ulx) / geotran[1] + 0.5)
rysize = int((lry - uly) / geotran[5] + 0.5)
if not querysize:
wxsize, wysize = rxsize, rysize
else:
wxsize, wysize = querysize, querysize
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx+rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry+rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tilesize, tileformat, profile
"""
args = {}
args['title'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = "EPSG:3857"
elif self.options.profile == 'geodetic':
args['srs'] = "EPSG:4326"
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tilesize)d" height="%(tilesize)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" % args # noqa
for z in range(self.tminz, self.tmaxz+1):
if self.options.profile == 'raster':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, (2**(self.nativezoom-z) * self.out_gt[1]), z)
elif self.options.profile == 'mercator':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 156543.0339/2**z, z)
elif self.options.profile == 'geodetic':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 0.703125/2**z, z)
s += """ </TileSets>
</TileMap>
"""
return s
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat,
publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" % args # noqa
if self.kml:
s += """
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += """
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += """
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" % args # noqa
s += """
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" % args # noqa
return s
def generate_leaflet(self):
"""
Template for leaflet.html implementing overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title.replace('"', '\\"')
args['htmltitle'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['centerlon'] = (args['north'] + args['south']) / 2.
args['centerlat'] = (args['west'] + args['east']) / 2.
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['beginzoom'] = self.tmaxz
args['tilesize'] = self.tilesize # not used
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url # not used
args['copyright'] = self.options.copyright.replace('"', '\\"')
s = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />
<title>%(htmltitle)s</title>
<!-- Leaflet -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet/1.3.4/leaflet.css" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet/1.3.4/leaflet.js"></script>
<!-- Ajax -->
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.11.0/jquery.min.js"></script>
<script src="http://ajax.googleapis.com/ajax/libs/jqueryui/1.10.4/jquery-ui.min.js"></script>
<style>
body { margin:0; padding:0; }
body, table, tr, td, th, div, h1, h2, input { font-family: "Calibri", "Trebuchet MS", "Ubuntu", Serif; font-size: 11pt; }
#map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */
.ctl {
padding: 2px 10px 2px 10px;
background: white;
background: rgba(255,255,255,0.9);
box-shadow: 0 0 15px rgba(0,0,0,0.2);
border-radius: 5px;
text-align: right;
}
.title {
font-size: 18pt;
font-weight: bold;
}
.src {
font-size: 10pt;
}
</style>
</head>
<body>
<div id="map"></div>
<script>
/* **** Leaflet **** */
var map = L.map('map').setView([%(centerlon)s, %(centerlat)s], %(beginzoom)s);
// Base layers
// .. OpenStreetMap
var osm = L.tileLayer('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', {maxZoom: 20, attribution: '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors'});
// .. Thaichote by GISTDA
var theos = L.tileLayer('http://go-tiles1.gistda.or.th/mapproxy/wmts/thaichote/GLOBAL_WEBMERCATOR/{z}/{x}/{y}.png', {maxZoom: 20, attribution: '© <a href = "http://www.gistda.or.th">GISTDA</a>'});
// .. Google Hybrid
var ghyb = L.tileLayer('https://mt1.google.com/vt/lyrs=y&x={x}&y={y}&z={z}', {maxZoom: 20, attribution: '© <a href = "#">Google</a>'});
// .. OSM Toner
//var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {maxZoom: 20, attribution: 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.'});
// .. White background
var white = L.tileLayer("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==");
// Overlay layers (TMS) and XYZ
// Use TMS = tms: ture OR Use XYZ = tms: false
var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {maxZoom: 22, tms: false, opacity: 0.9, attribution: "%(copyright)s"}).addTo(map);
// Map
/*
var map = L.map('map', {
center: [%(centerlon)s, %(centerlat)s],
zoom: %(beginzoom)s,
minZoom: %(minzoom)s,
maxZoom: %(maxzoom)s,
layers: [osm]
});
*/
var basemaps = {"OpenStreetMap": osm, "Thaichote by GISTDA": theos, "Google Hybrid": ghyb, "Without background": white}
var overlaymaps = {"Layer": lyr}
// Title
var title = L.control();
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl title');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = "%(title)s";
};
title.addTo(map);
// Note
var src = 'Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2018 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a> & <a href="http://www.cgistln.nu.ac.th/">GISTNU</a>';
var title = L.control({position: 'bottomleft'});
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl src');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = src;
};
title.addTo(map);
// Add base layers
L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);
// Fit to overlay bounds (SW and NE points with (lat, lon))
//map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);
</script>
</body>
</html>
""" % args # noqa
return s
def generate_openlayers(self):
"""
Template for openlayers.html implementing overlay of available Spherical Mercator layers.
It returns filled string. Expected variables:
title, bingkey, north, south, east, west, minzoom, maxzoom, tilesize, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tilesize'] = self.tilesize
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.tmscompatible:
args['tmsoffset'] = "-1"
else:
args['tmsoffset'] = ""
if self.options.profile == 'raster':
args['rasterzoomlevels'] = self.tmaxz+1
args['rastermaxresolution'] = 2**(self.nativezoom) * self.out_gt[1]
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
.olImageLoadError { display: none; }
.olControlLayerSwitcher .layersDiv { border-radius: 10px 0 0 10px; }
</style>""" % args # noqa
if self.options.profile == 'mercator':
s += """
<!--<script src='http://maps.google.com/maps/api/js?sensor=false&v=3.7'></script>-->
""" % args
s += """
<script src="http://www.openlayers.org/api/2.12/OpenLayers.js"></script>
<script>
var map;
var mapBounds = new OpenLayers.Bounds( %(west)s, %(south)s, %(east)s, %(north)s);
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var emptyTileURL = "http://www.maptiler.org/img/none.png";
OpenLayers.IMAGE_RELOAD_ATTEMPTS = 3;
function init(){""" % args
if self.options.profile == 'mercator':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:3857",
displayProjection: new OpenLayers.Projection("EPSG:4326"),
numZoomLevels: 22
};
map = new OpenLayers.Map(options);
// Create Google Mercator layers
/*
var gmap = new OpenLayers.Layer.Google("Google Streets",
{
type: google.maps.MapTypeId.ROADMAP,
sphericalMercator: true
});
var gsat = new OpenLayers.Layer.Google("Google Satellite",
{
type: google.maps.MapTypeId.SATELLITE,
sphericalMercator: true
});
var ghyb = new OpenLayers.Layer.Google("Google Hybrid",
{
type: google.maps.MapTypeId.HYBRID,
sphericalMercator: true
});
var gter = new OpenLayers.Layer.Google("Google Terrain",
{
type: google.maps.MapTypeId.TERRAIN,
sphericalMercator: true
});
// Create Bing layers
var broad = new OpenLayers.Layer.Bing({
name: "Bing Roads",
key: "%(bingkey)s",
type: "Road",
sphericalMercator: true
});
var baer = new OpenLayers.Layer.Bing({
name: "Bing Aerial",
key: "%(bingkey)s",
type: "Aerial",
sphericalMercator: true
});
var bhyb = new OpenLayers.Layer.Bing({
name: "Bing Hybrid",
key: "%(bingkey)s",
type: "AerialWithLabels",
sphericalMercator: true
});
*/
// Create Google Map XYZ layer
var gmap = new OpenLayers.Layer.XYZ("Google Streets",
"https://mt1.google.com/vt/lyrs=r&x=${x}&y=${y}&z=${z}", {
transitionEffect: 'resize'
});
var ghyb = new OpenLayers.Layer.XYZ("Google Hybrid",
"https://mt1.google.com/vt/lyrs=y&x=${x}&y=${y}&z=${z}", {
transitionEffect: 'resize'
});
// Create OSM layer
var osm = new OpenLayers.Layer.OSM("OpenStreetMap");
""" % args # noqa
if self.options.xyz:
s += """
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.XYZ("XYZ Overlay",
"${z}/${x}/${y}.png", {
transitionEffect: 'resize',
isBaseLayer: false
});
""" % args # noqa
else:
s += """
// create TMS Overlay layer
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
""" % args # noqa
s += """
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([gmap, ghyb, osm, tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds.transform(map.displayProjection, map.projection));
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
var options = {
div: "map",
controls: [],
projection: "EPSG:4326"
};
map = new OpenLayers.Map(options);
var wms = new OpenLayers.Layer.WMS("VMap0",
"http://tilecache.osgeo.org/wms-c/Basic.py?",
{
layers: 'basic',
format: 'image/png'
}
);
var tmsoverlay = new OpenLayers.Layer.TMS("TMS Overlay", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
isBaseLayer: false,
getURL: getURL
});
if (OpenLayers.Util.alphaHack() == false) {
tmsoverlay.setOpacity(0.7);
}
map.addLayers([wms,tmsoverlay]);
var switcherControl = new OpenLayers.Control.LayerSwitcher();
map.addControl(switcherControl);
switcherControl.maximizeControl();
map.zoomToExtent(mapBounds);
""" % args # noqa
elif self.options.profile == 'raster':
s += """
var options = {
div: "map",
controls: [],
maxExtent: new OpenLayers.Bounds(%(west)s, %(south)s, %(east)s, %(north)s),
maxResolution: %(rastermaxresolution)f,
numZoomLevels: %(rasterzoomlevels)d
};
map = new OpenLayers.Map(options);
var layer = new OpenLayers.Layer.TMS("TMS Layer", "",
{
serviceVersion: '.',
layername: '.',
alpha: true,
type: '%(tileformat)s',
getURL: getURL
});
map.addLayer(layer);
map.zoomToExtent(mapBounds);
""" % args # noqa
s += """
map.addControls([new OpenLayers.Control.PanZoomBar(),
new OpenLayers.Control.Navigation(),
new OpenLayers.Control.MousePosition(),
new OpenLayers.Control.ArgParser(),
new OpenLayers.Control.Attribution()]);
}
""" % args
if self.options.profile == 'mercator' and self.options.xyz is None:
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
if (this.map.baseLayer.CLASS_NAME === 'OpenLayers.Layer.Bing') {
z+=1;
}
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'geodetic':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom()%(tmsoffset)s;
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
elif self.options.profile == 'raster':
s += """
function getURL(bounds) {
bounds = this.adjustBounds(bounds);
var res = this.getServerResolution();
var x = Math.round((bounds.left - this.tileOrigin.lon) / (res * this.tileSize.w));
var y = Math.round((bounds.bottom - this.tileOrigin.lat) / (res * this.tileSize.h));
var z = this.getServerZoom();
var path = this.serviceVersion + "/" + this.layername + "/" + z + "/" + x + "/" + y + "." + this.type;
var url = this.url;
if (OpenLayers.Util.isArray(url)) {
url = this.selectUrl(path, url);
}
if (mapBounds.intersectsBounds(bounds) && (z >= mapMinZoom) && (z <= mapMaxZoom)) {
return url + path;
} else {
return emptyTileURL;
}
}
""" % args # noqa
s += """
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
if (map.updateSize) { map.updateSize(); };
}
onresize=function(){ resize(); };
</script>
</head>
<body onload="init()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
<script type="text/javascript" >resize()</script>
</body>
</html>""" % args # noqa
return s
@staticmethod
def getYtile(ty, tz, options):
"""
Calculates the y-tile number based on whether XYZ or TMS (default) system is used
:param ty: The y-tile number
:return: The transformed tile number
"""
if options.xyz:
return (2**tz - 1) - ty # Convert from TMS to XYZ numbering system
else:
return ty
def worker_tile_details(input_file, output_folder, options, send_pipe=None):
try:
gdal2tiles = GDAL2Tiles(input_file, output_folder, options)
gdal2tiles.open_input()
gdal2tiles.generate_metadata()
tile_job_info, tile_details = gdal2tiles.generate_base_tiles()
return_data = (tile_job_info, tile_details)
if send_pipe:
send_pipe.send(return_data)
return return_data
except Exception as e:
print("worker_tile_details failed ", str(e))
def progress_printer_thread(queue, nb_jobs):
pb = ProgressBar(nb_jobs)
pb.start()
for _ in range(nb_jobs):
queue.get()
pb.log_progress()
queue.task_done()
class ProgressBar(object):
def __init__(self, total_items):
self.total_items = total_items
self.nb_items_done = 0
self.current_progress = 0
self.STEP = 2.5
def start(self):
sys.stdout.write("0")
def log_progress(self, nb_items=1):
self.nb_items_done += nb_items
progress = float(self.nb_items_done) / self.total_items * 100
if progress >= self.current_progress + self.STEP:
done = False
while not done:
if self.current_progress + self.STEP <= progress:
self.current_progress += self.STEP
if self.current_progress % 10 == 0:
sys.stdout.write(str(int(self.current_progress)))
if self.current_progress == 100:
sys.stdout.write("\n")
else:
sys.stdout.write(".")
else:
done = True
sys.stdout.flush()
def get_tile_swne(tile_job_info, options):
if options.profile == 'mercator':
mercator = GlobalMercator()
tile_swne = mercator.TileLatLonBounds
elif options.profile == 'geodetic':
geodetic = GlobalGeodetic(options.tmscompatible)
tile_swne = geodetic.TileLatLonBounds
elif options.profile == 'raster':
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
if tile_job_info.kml and tile_job_info.in_srs_wkt:
in_srs = osr.SpatialReference()
in_srs.ImportFromWkt(tile_job_info.in_srs_wkt)
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2 ** (tile_job_info.tmaxz - z) * tile_job_info.out_geo_trans[1])
west = tile_job_info.out_geo_trans[0] + x * tile_job_info.tilesize * pixelsizex
east = west + tile_job_info.tilesize * pixelsizex
south = tile_job_info.ominy + y * tile_job_info.tilesize * pixelsizex
north = south + tile_job_info.tilesize * pixelsizex
if not tile_job_info.is_epsg_4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
tile_swne = rastertileswne
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
return tile_swne
def single_threaded_tiling(input_file, output_folder, options):
"""
Keep a single threaded version that stays clear of multiprocessing, for platforms that would not
support it
"""
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = worker_tile_details(input_file, output_folder, options)
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
for tile_detail in tile_details:
create_base_tile(conf, tile_detail, options)
if not options.verbose and not options.quiet:
progress_bar.log_progress()
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def multi_threaded_tiling(input_file, output_folder, options):
nb_processes = options.nb_processes or 1
(conf_receiver, conf_sender) = Pipe(False)
if options.verbose:
print("Begin tiles details calc")
p = Process(target=worker_tile_details,
args=[input_file, output_folder, options],
kwargs={"send_pipe": conf_sender})
p.start()
# Make sure to consume the queue before joining. If the payload is too big, it won't be put in
# one go in the queue and therefore the sending process will never finish, waiting for space in
# the queue to send data
conf, tile_details = conf_receiver.recv()
p.join()
if options.verbose:
print("Tiles details calc complete.")
# Have to create the Queue through a multiprocessing.Manager to get a Queue Proxy,
# otherwise you can't pass it as a param in the method invoked by the pool...
manager = Manager()
queue = manager.Queue()
pool = Pool(processes=nb_processes)
# TODO: gbataille - check the confs for which each element is an array... one useless level?
# TODO: gbataille - assign an ID to each job for print in verbose mode "ReadRaster Extent ..."
# TODO: gbataille - check memory footprint and time on big image. are they opened x times
for tile_detail in tile_details:
pool.apply_async(create_base_tile, (conf, tile_detail, options), {"queue": queue})
if not options.verbose and not options.quiet:
p = Process(target=progress_printer_thread, args=[queue, len(tile_details)])
p.start()
pool.close()
pool.join() # Jobs finished
if not options.verbose and not options.quiet:
p.join() # Traces done
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def main():
# TODO: gbataille - use mkdtemp to work in a temp directory
# TODO: gbataille - debug intermediate tiles.vrt not produced anymore?
# TODO: gbataille - Refactor generate overview tiles to not depend on self variables
argv = gdal.GeneralCmdLineProcessor(sys.argv)
input_file, output_folder, options = process_args(argv[1:])
nb_processes = options.nb_processes or 1
if nb_processes == 1:
single_threaded_tiling(input_file, output_folder, options)
else:
multi_threaded_tiling(input_file, output_folder, options)
if __name__ == '__main__':
# start_time = time.time()
main()
# print("--- %s seconds ---" % (time.time() - start_time))
|
example.py | from __future__ import print_function
from treestream import RedisTreeWriter
from threading import Thread, Lock
from time import sleep
from random import random, randrange
from treestream import (
RedisTreeReader,
RedisTreeWriter,
)
print_lock = Lock()
def start_thread(func):
thread = Thread(target=func)
thread.setDaemon(True)
thread.start()
return thread
def tprint(*args):
with print_lock:
print(*args)
class MyReader(RedisTreeReader):
def __init__(self, reader_name, **kwargs):
super(MyReader, self).__init__(**kwargs)
self.reader_name = reader_name
def handle_update(self, timestamp, tree_path, old_value, value):
tprint(self.reader_name, tree_path, '->', value)
def handle_delete(self, timestamp, tree_path, old_value):
tprint(self.reader_name, tree_path, '-> None')
def handle_delete_subtree(self, timestamp, tree_path, deleted_subtree):
pass
def handle_sync_lost(self):
tprint(self.reader_name, 'sync-lost')
def handle_synced(self, tree):
tprint(self.reader_name, 'synced')
with print_lock:
tree.pretty_print(depth=4)
def writer1():
'''updates the value of a leaf node every ~100ms'''
wr = RedisTreeWriter()
start_thread(wr.run)
wr.delete(()) # clear the tree
def loop():
while True:
tree_path = [str(randrange(3)) for _ in range(3)]
value = str(randrange(1000,10000))
wr.update(tree_path, value)
sleep(random() * 0.1)
start_thread(loop)
return wr
def writer2():
'''deletes a subtree every ~5s'''
wr = RedisTreeWriter()
start_thread(wr.run)
def loop():
while True:
tree_path_prefix = [str(randrange(3)) for _ in range(randrange(4))]
tprint('DELETING', tree_path_prefix)
wr.delete(tree_path_prefix)
sleep(random() * 5.0)
start_thread(loop)
return wr
def reader1():
rd = MyReader('reader1')
start_thread(rd.run)
return rd
def reader2():
rd = MyReader('reader2')
start_thread(rd.run)
return rd
def main():
wr1 = writer1()
wr2 = writer2()
rd1 = reader1()
sleep(5.0)
tprint('reader2 starting')
rd2 = reader2() # start later, so that we get a sync first
sleep(5.0)
wr1.stop()
wr2.stop()
rd1.stop()
rd2.stop()
if __name__ == '__main__':
main()
|
collector.py | from typing import List
from kubemon.collector.commands import ConnectedMonitorsCommand, InstancesCommand, NotExistCommand, StartCommand, StopCommand
from ..dataclasses import Client
from ..config import DATA_PATH, DEFAULT_CLI_PORT
from ..utils import save_csv, receive, send_to
from addict import Dict
from datetime import datetime
from os.path import join as join_path
import socket
import threading
from ..log import create_logger
def start_thread(func, args=tuple()):
"""
Function to create and start a thread
Args:
func (function): Target function
args (tuple): Target function arguments
"""
threading.Thread(target=func, args=args).start()
LOGGER = create_logger(__name__)
class Collector(threading.Thread):
def __init__(self, address: str, port: int, cli_port=DEFAULT_CLI_PORT):
threading.Thread.__init__(self)
self.__address = address
self.__port = port
self.__cli_port = cli_port
self.__instances = list()
self.dir_name = None
self.is_running = False
self.name = self.__class__.__name__
self.mutex = threading.Lock()
@property
def address(self):
return self.__address
@property
def port(self):
return self.__port
@property
def cli_port(self):
return self.__cli_port
@property
def connected_instances(self):
return len(self.__instances)
@property
def daemons(self) -> List[str]:
unique = []
for client in self.__instances:
if client.address[0] not in unique:
unique.append(client.address[0])
return unique
def __accept_connections(self, sockfd: socket.socket) -> None:
LOGGER.debug("Started function __accept_connections")
while True:
client, address = sockfd.accept()
# Receiving the monitor name
name, _ = receive(client, buffer_size=512)
LOGGER.debug(f"Received name={name}")
client = Client(name, client, client.getsockname())
self.mutex.acquire()
self.__instances.append(client)
self.mutex.release()
LOGGER.info(f"{name} connected. Address={address[0]}:{address[1]}")
start_thread(self.__listen_monitors, (client,))
print(self.daemons)
def __listen_cli(self, cli: socket.socket) -> None:
"""
Function to receive and redirect commands from a CLI to monitors.
Currently it is based on UDP sockets.
Args:
cli (socket.socket): client socket
Returns:
None
"""
while True:
data, addr = receive(cli)
LOGGER.info(f"Received command '{data}' from {addr[0]}:{addr[1]}")
if data:
print(data)
data = data.split()
cmd = data[0].lower() # Command
if cmd == "start":
if len(data) == 2:
self.dir_name = data[1]
LOGGER.debug(f"dir_name setted to {self.dir_name}")
command = StartCommand(self.daemons, self.dir_name, self.address)
self.is_running = True
elif cmd == "instances":
command = InstancesCommand(self.__instances)
elif cmd == "monitors":
command = ConnectedMonitorsCommand(self.__instances)
elif cmd == "stop":
self.is_running = False
command = StopCommand(self.__instances, self.daemons, self.is_running)
else:
command = NotExistCommand()
message = command.execute()
send_to(cli, message, address=addr)
LOGGER.debug(f"Sending '{message}' to {addr[0]}:{addr[1]}")
def __setup_socket(self, address: str, port: int, socktype: socket.SocketKind) -> socket.socket:
"""
Setup a server socket
Args:
address (str): Address for binding
port (int): Port for binding
"""
sockfd = socket.socket(socket.AF_INET, socktype)
try:
sockfd.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sockfd.bind((address, port))
except:
self.error(f"Error while trying to bind socket to port {port}")
sockfd.close()
exit(1)
return sockfd
def __start_cli(self) -> None:
""" Wrapper function to setup CLI. """
# Setup socket
sockfd = self.__setup_socket(self.address, self.cli_port, socket.SOCK_DGRAM)
LOGGER.info(f'Started collector CLI at {self.address}:{self.cli_port}')
# Start listening for commands
self.__listen_cli(sockfd)
def __start_collector(self) -> None:
""" Wrapper function to setup the collector. """
# Setup socket
sockfd = self.__setup_socket(self.address, self.port, socket.SOCK_STREAM)
sockfd.listen()
LOGGER.info(f"Started collector at {self.address}:{self.port}")
# Start accepting incoming connections from monitors
self.__accept_connections(sockfd)
def run(self) -> None:
""" Start the collector """
start_thread(self.__start_cli)
self.__start_collector()
LOGGER.debug("Call from function start")
def __listen_monitors(self, client: Client) -> None:
""" Listen for monitors.
Args:
client (socket.socket): Monitor socket
address (tuple): Monitor address
Returns: None
"""
LOGGER.info(f"Creating new thread for client {client.name}@{client.address[0]}:{client.address[1]}")
while True:
try:
data, _ = receive(client.socket_obj, buffer_size=2048)
if data != None:
LOGGER.info(f"Successfully received data from {client.name}@{client.address[0]}:{client.address[1]}")
else:
LOGGER.info(f"Received nothing from {client.name}")
if isinstance(data, dict):
data = Dict(data)
data.data.update({'timestamp': datetime.now()})
dir_name = data.source
if self.dir_name:
dir_name = join_path(self.dir_name, data.source.split("_")[0])
save_csv(data.data, data.source, dir_name=dir_name)
LOGGER.debug(f"Saving data to {str(DATA_PATH)}/{self.dir_name}/{data.source}")
msg = f"OK - {datetime.now()}"
send_to(client.socket_obj, msg)
LOGGER.debug(f"Sending '{msg}' to client {client.name}")
except:
addr, port = client.address
LOGGER.info(f"Unregistered {client.name} {addr}:{port}")
LOGGER.error('What happened??', exc_info=1)
self.mutex.acquire()
self.__instances.remove(client)
self.mutex.release()
# Killing thread
exit(1)
|
run-tests.py | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from argparse import ArgumentParser
import os
import re
from shutil import rmtree, which
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
import queue as Queue
from multiprocessing import Manager
python_test_goals = ["test_sqlflow"]
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = None
LOG_FILE = os.path.join(os.path.abspath(os.path.dirname(__file__)), "unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
def run_individual_python_test(target_dir, test_name, python_exec, params):
env = dict(os.environ)
env.update({
'PYTHONPATH': params["SQLFLOW_PYTHONPATH"],
'SQLFLOW_LIB': params["SQLFLOW_LIB"]
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
metastore_dir = os.path.join(tmp_dir, str(uuid.uuid4()))
while os.path.isdir(metastore_dir):
metastore_dir = os.path.join(metastore_dir, str(uuid.uuid4()))
os.mkdir(metastore_dir)
LOGGER.info("Starting test(%s): %s", python_exec, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
# TODO: Needs to run this test on conda virtual env
retcode = subprocess.Popen(
[python_exec, f"{params['SQLFLOW_PYTHONPATH']}/tests/{test_name}.py"],
stderr=per_test_output, stdout=per_test_output, env=env).wait()
rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, python_exec)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode("utf-8", "replace")
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, python_exec))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode("utf-8", "replace"), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(.*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (python_exec, test_name)
assert SKIPPED_TESTS is not None
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", python_exec, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", python_exec, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python3.7", "pypy3"] if which(x)]
if "python3.7" not in python_execs:
p = which("python3")
if not p:
LOGGER.error("No python3 executable found. Exiting!")
os._exit(1)
else:
python_execs.insert(0, p)
return python_execs
def parse_opts():
parser = ArgumentParser(prog="run-tests")
parser.add_argument(
"--package", type=str, required=True,
help="A compiled jar assembly of `spark-sql-flow-plugin`"
)
parser.add_argument(
"--module", type=str, required=True,
help="A absolute path of the test module"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def main():
opts = parse_opts()
params = {
"SQLFLOW_LIB": opts.package,
"SQLFLOW_PYTHONPATH": opts.module
}
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running Python tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if opts.testnames is not None:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
python_implementation = subprocess.check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.info("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.info("%s version is: %s", python_exec, subprocess.check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if opts.testnames is None:
for test_goal in python_test_goals:
task_queue.put((100, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec, params)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
python_exec, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, python_exec))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
SKIPPED_TESTS = Manager().dict()
main()
|
settings.py | import threading
import tkinter as tk
import webbrowser
from datetime import date
from pathlib import Path
from tkinter import messagebox
import api.remarkable_client
import utils.config as cfg
from api.remarkable_client import RemarkableClient
from model.item_manager import ItemManager
class Settings(object):
def __init__(self, root):
self.rm_client=RemarkableClient()
self.item_manager = ItemManager()
scaling = cfg.get("scaling", 1 / root.tk.call('tk', 'scaling'))
title_font = "Helvetica %d bold" % int(14 * scaling)
root.grid_columnconfigure(4, minsize=180)
root.grid_rowconfigure(1, minsize=50)
root.grid_rowconfigure(2, minsize=30)
root.grid_rowconfigure(3, minsize=30)
root.grid_rowconfigure(4, minsize=30)
root.grid_rowconfigure(6, minsize=50)
root.grid_rowconfigure(7, minsize=30)
root.grid_rowconfigure(8, minsize=30)
root.grid_rowconfigure(9, minsize=50)
root.grid_rowconfigure(13, minsize=50)
root.grid_rowconfigure(14, minsize=50)
# gaps between columns
label = tk.Label(root, text=" ")
label.grid(row=1, column=1)
label = tk.Label(root, text=" ")
label.grid(row=1, column=3)
label = tk.Label(root, text=" ")
label.grid(row=1, column=5)
# Authentication
label = tk.Label(root, text="Authentication", font=title_font)
label.grid(row=1, column=2, sticky="W")
self.onetime_code_link = "https://my.remarkable.com/connect/remarkable"
self.label_onetime_code = tk.Label(root, justify="left", anchor="w",
fg="blue", cursor="hand2", text="\nDownload one-time code from \n" + self.onetime_code_link)
self.label_onetime_code.grid(row=2, column=7, sticky="SW")
self.label_onetime_code.bind("<Button-1>", lambda e: webbrowser.open_new(self.onetime_code_link))
label = tk.Label(root, justify="left", anchor="w", text="Status: ")
label.grid(row=2, column=2, sticky="W")
self.label_auth_status = tk.Label(root, text="Unknown")
self.label_auth_status.grid(row=2, column=4, sticky="W")
label = tk.Label(root, justify="left", anchor="w", text="One-time code:")
label.grid(row=3, column=2, sticky="W")
self.entry_onetime_code_text = tk.StringVar()
self.entry_onetime_code = tk.Entry(root, textvariable=self.entry_onetime_code_text)
self.entry_onetime_code.grid(row=3, column=4, sticky="W")
self.btn_sign_in = tk.Button(root, text="Sign In", command=self.btn_sign_in_click, width=17)
self.btn_sign_in.grid(row=4, column=4, sticky="W")
# General
label = tk.Label(root, text="General", font=title_font)
label.grid(row=6, column=2, sticky="W")
label = tk.Label(root, text="Templates path:")
label.grid(row=7, column=2, sticky="W")
self.entry_templates_text = tk.StringVar()
self.entry_templates_text.set(cfg.get("general.templates", default=""))
self.entry_templates = tk.Entry(root, textvariable=self.entry_templates_text)
self.entry_templates.grid(row=7, column=4, sticky="W")
label = tk.Label(root, justify="left", anchor="w", text="A local folder that contains all template PNG files. \nYou can copy the template files from your tablet: \n'/usr/share/remarkable'")
label.grid(row=7, column=7, sticky="W")
label = tk.Label(root, text="Backup root path:")
label.grid(row=8, column=2, sticky="W")
self.backup_root_text = tk.StringVar()
backup_root_default = Path.joinpath(Path.home(), "Backup","Remarkable")
backup_root = cfg.get("general.backuproot", default=str(backup_root_default))
self.backup_root_text.set(backup_root)
self.entry_backup_root = tk.Entry(root, textvariable=self.backup_root_text)
self.entry_backup_root.grid(row=8, column=4, sticky="W")
label = tk.Label(root, justify="left", anchor="w", text="A local folder that will be used as the root folder for backups.")
label.grid(row=8, column=7, sticky="W")
self.btn_save = tk.Button(root, text="Save", command=self.btn_save_click, width=17)
self.btn_save.grid(row=9, column=4, sticky="W")
# Backup
label = tk.Label(root, text="Backup", font=title_font)
label.grid(row=10, column=2, sticky="W")
label = tk.Label(root, text="Backup path:")
label.grid(row=11, column=2, sticky="W")
self.backup_folder_text = tk.StringVar()
backup_folder = str(date.today().strftime("%Y-%m-%d"))
self.backup_folder_text.set(backup_folder)
self.entry_backup_folder = tk.Entry(root, textvariable=self.backup_folder_text)
self.entry_backup_folder.grid(row=11, column=4, sticky="W")
self.label_backup_progress = tk.Label(root)
self.label_backup_progress.grid(row=11, column=6)
label = tk.Label(root, justify="left", anchor="w", text="Copy currently downloaded and annotated PDF files \ninto the given directory. Note that those files can not \nbe restored on the tablet.")
label.grid(row=11, column=7, sticky="W")
self.btn_create_backup = tk.Button(root, text="Create backup", command=self.btn_create_backup, width=17)
self.btn_create_backup.grid(row=12, column=4, sticky="W")
# UI
label = tk.Label(root, text="UI", font=title_font)
label.grid(row=13, column=2, sticky="W")
label = tk.Label(root, text="Scaling")
label.grid(row=14, column=2, sticky="W")
self.scaling_text = tk.StringVar()
self.scaling_text.set(str(cfg.get("scaling",1/root.tk.call('tk', 'scaling'))*100)+"%")
self.entry_scaling=tk.Entry(root, textvariable=self.scaling_text)
self.entry_scaling.grid(row=14,column=4, sticky="W")
label = tk.Label(root, justify="left", anchor="w", text="UI scaling will be applied after a restart.")
label.grid(row=14, column=7, sticky="W")
self.btn_apply_scaling = tk.Button(root, text="Save scaling", command=self.btn_apply_scaling, width=17)
self.btn_apply_scaling.grid(row=15, column=4, sticky="W")
# Subscribe to sign in event. Outer logic (i.e. main) can try to
# sign in automatically...
self.rm_client.listen_sign_in_event(self)
#
# EVENT HANDLER
#
def sign_in_event_handler(self, event, config):
self.btn_sign_in.config(state = "normal")
self.entry_onetime_code.config(state="normal")
self.btn_create_backup.config(state="disabled")
self.btn_save.config(state="disabled")
self.entry_backup_root.config(state="disabled")
self.entry_backup_folder.config(state="disabled")
self.entry_templates.config(state="disabled")
if event == api.remarkable_client.EVENT_SUCCESS:
self.btn_sign_in.config(state="disabled")
self.entry_onetime_code.config(state="disabled")
self.btn_create_backup.config(state="normal")
self.btn_save.config(state="normal")
self.entry_backup_root.config(state="normal")
self.entry_backup_folder.config(state="normal")
self.entry_templates.config(state="normal")
self.label_auth_status.config(text="Successfully signed in", fg="green")
elif event == api.remarkable_client.EVENT_USER_TOKEN_FAILED:
self.label_auth_status.config(text="Could not renew user token\n(please try again).", fg="red")
self.entry_onetime_code.config(state="disabled")
elif event == api.remarkable_client.EVENT_ONETIMECODE_NEEDED:
self.label_auth_status.config(text="Enter one-time code.", fg="red")
else:
self.label_auth_status.config(text="Could not sign in.", fg="red")
def btn_sign_in_click(self):
onetime_code = self.entry_onetime_code_text.get()
self.rm_client.sign_in(onetime_code)
def btn_save_click(self):
general = {
"templates": self.entry_templates_text.get(),
"backuproot": self.backup_root_text.get()
}
cfg.save({"general": general})
def btn_create_backup(self):
message = "If your explorer is not synchronized, some files are not included in the backup. Should we continue?"
result = messagebox.askquestion("Info", message, icon='warning')
if result != "yes":
return
backup_root = self.backup_root_text.get()
backup_folder = self.backup_folder_text.get()
backup_path = Path.joinpath(Path(backup_root), backup_folder)
self.label_backup_progress.config(text="Writing backup '%s'" % backup_path)
def run():
self.item_manager.create_backup(backup_path)
self.label_backup_progress.config(text="")
messagebox.showinfo("Info", "Successfully created backup '%s'" % backup_path)
threading.Thread(target=run).start()
def btn_apply_scaling(self):
scaling_text = self.scaling_text.get().strip()
try:
if len(scaling_text) >= 2 and scaling_text[-1] == "%":
scaling = float(scaling_text[0:-1]) / 100
else:
scaling = float(scaling_text)
except ValueError:
return
print("scaling: ", scaling)
cfg.save({"scaling": scaling})
|
test_functools.py | import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
from weakref import proxy
import contextlib
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
@unittest.skipIf(sys.platform == "win32", "TODO: RUSTPYTHON, thread 'main' has overflowed its stack on Windows")
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_pickle(self):
super().test_pickle()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_recursive_pickle(self):
super().test_recursive_pickle()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# TODO: RUSTPYTHON
def test_pickle(self):
TestPartial.test_pickle(self)
# TODO: RUSTPYTHON
def test_recursive_pickle(self):
TestPartial.test_recursive_pickle(self)
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
with self.assertWarns(DeprecationWarning):
class B:
method = functools.partialmethod(func=capture, a=1)
b = B()
self.assertEqual(b.method(2, x=3), ((b, 2), {'a': 1, 'x': 3}))
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
def test_positional_only(self):
def f(a, b, /):
return a + b
p = functools.partial(f, 1)
self.assertEqual(p(2), f(1, 2))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
class TestReducePy(TestReduce, unittest.TestCase):
reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_bad_cmp(self):
super().test_bad_cmp()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cmp_to_key(self):
super().test_cmp_to_key()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_cmp_to_key_arguments(self):
super().test_cmp_to_key_arguments()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_hash(self):
super().test_hash()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_obj_field(self):
super().test_obj_field()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_sort_int(self):
super().test_sort_int()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_sort_int_str(self):
super().test_sort_int_str()
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_no_args(self):
@self.module.lru_cache
def square(x):
return x ** 2
self.assertEqual(list(map(square, [10, 20, 10])),
[100, 400, 100])
self.assertEqual(square.cache_info().hits, 1)
self.assertEqual(square.cache_info().misses, 2)
self.assertEqual(square.cache_info().maxsize, 128)
self.assertEqual(square.cache_info().currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with support.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with support.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with support.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
def test_type_ann_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
return "base"
@t.register
def _(self, arg: int):
return "int"
@t.register
def _(self, arg: str):
return "str"
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Iterable[str] is not a class.'
))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
class CachedCostItem:
_cost = 1
def __init__(self):
self.lock = py_functools.RLock()
@py_functools.cached_property
def cost(self):
"""The cost of the item."""
with self.lock:
self._cost += 1
return self._cost
class OptionallyCachedCostItem:
_cost = 1
def get_cost(self):
"""The cost of the item."""
self._cost += 1
return self._cost
cached_cost = py_functools.cached_property(get_cost)
class CachedCostItemWait:
def __init__(self, event):
self._cost = 1
self.lock = py_functools.RLock()
self.event = event
@py_functools.cached_property
def cost(self):
self.event.wait(1)
with self.lock:
self._cost += 1
return self._cost
class CachedCostItemWithSlots:
__slots__ = ('_cost')
def __init__(self):
self._cost = 1
@py_functools.cached_property
def cost(self):
raise RuntimeError('never called, slots not supported')
class TestCachedProperty(unittest.TestCase):
def test_cached(self):
item = CachedCostItem()
self.assertEqual(item.cost, 2)
self.assertEqual(item.cost, 2) # not 3
def test_cached_attribute_name_differs_from_func_name(self):
item = OptionallyCachedCostItem()
self.assertEqual(item.get_cost(), 2)
self.assertEqual(item.cached_cost, 3)
self.assertEqual(item.get_cost(), 4)
self.assertEqual(item.cached_cost, 3)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_threaded(self):
go = threading.Event()
item = CachedCostItemWait(go)
num_threads = 3
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
threads = [
threading.Thread(target=lambda: item.cost)
for k in range(num_threads)
]
with support.start_threads(threads):
go.set()
finally:
sys.setswitchinterval(orig_si)
self.assertEqual(item.cost, 2)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_object_with_slots(self):
item = CachedCostItemWithSlots()
with self.assertRaisesRegex(
TypeError,
"No '__dict__' attribute on 'CachedCostItemWithSlots' instance to cache 'cost' property.",
):
item.cost
def test_immutable_dict(self):
class MyMeta(type):
@py_functools.cached_property
def prop(self):
return True
class MyClass(metaclass=MyMeta):
pass
with self.assertRaisesRegex(
TypeError,
"The '__dict__' attribute on 'MyMeta' instance does not support item assignment for caching 'prop' property.",
):
MyClass.prop
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reuse_different_names(self):
"""Disallow this case because decorated function a would not be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@py_functools.cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError("Cannot assign the same cached_property to two different names ('a' and 'b')."))
)
def test_reuse_same_name(self):
"""Reusing a cached_property on different classes under the same name is OK."""
counter = 0
@py_functools.cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_set_name_not_called(self):
cp = py_functools.cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
with self.assertRaisesRegex(
TypeError,
"Cannot use cached_property instance without calling __set_name__ on it.",
):
Foo().cp
def test_access_from_class(self):
self.assertIsInstance(CachedCostItem.cost, py_functools.cached_property)
def test_doc(self):
self.assertEqual(CachedCostItem.cost.__doc__, "The cost of the item.")
if __name__ == '__main__':
unittest.main()
|
multiprocess_sgskip.py | """
============
Multiprocess
============
Demo of using multiprocessing for generating data in one process and
plotting in another.
Written by Robert Cimrman
"""
import multiprocessing as mp
import time
import matplotlib.pyplot as plt
import numpy as np
# Fixing random state for reproducibility
np.random.seed(19680801)
###############################################################################
#
# Processing Class
# ================
#
# This class plots data it receives from a pipe.
#
class ProcessPlotter(object):
def __init__(self):
self.x = []
self.y = []
def terminate(self):
plt.close('all')
def call_back(self):
while self.pipe.poll():
command = self.pipe.recv()
if command is None:
self.terminate()
return False
else:
self.x.append(command[0])
self.y.append(command[1])
self.ax.plot(self.x, self.y, 'ro')
self.fig.canvas.draw()
return True
def __call__(self, pipe):
print('starting plotter...')
self.pipe = pipe
self.fig, self.ax = plt.subplots()
timer = self.fig.canvas.new_timer(interval=1000)
timer.add_callback(self.call_back)
timer.start()
print('...done')
plt.show()
###############################################################################
#
# Plotting class
# ==============
#
# This class uses multiprocessing to spawn a process to run code from the
# class above. When initialized, it creates a pipe and an instance of
# ``ProcessPlotter`` which will be run in a separate process.
#
# When run from the command line, the parent process sends data to the spawned
# process which is then plotted via the callback function specified in
# ``ProcessPlotter:__call__``.
#
class NBPlot(object):
def __init__(self):
self.plot_pipe, plotter_pipe = mp.Pipe()
self.plotter = ProcessPlotter()
self.plot_process = mp.Process(
target=self.plotter, args=(plotter_pipe,), daemon=True)
self.plot_process.start()
def plot(self, finished=False):
send = self.plot_pipe.send
if finished:
send(None)
else:
data = np.random.random(2)
send(data)
def main():
pl = NBPlot()
for ii in range(10):
pl.plot()
time.sleep(0.5)
pl.plot(finished=True)
if __name__ == '__main__':
if plt.get_backend() == "MacOSX":
mp.set_start_method("forkserver")
main()
|
runtests.py | #!/usr/bin/env python2.7
#
# Copyright 2015 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
Testing dec2flt
===============
These are *really* extensive tests. Expect them to run for hours. Due to the
nature of the problem (the input is a string of arbitrary length), exhaustive
testing is not really possible. Instead, there are exhaustive tests for some
classes of inputs for which that is feasible and a bunch of deterministic and
random non-exhaustive tests for covering everything else.
The actual tests (generating decimal strings and feeding them to dec2flt) is
performed by a set of stand-along rust programs. This script compiles, runs,
and supervises them. The programs report the strings they generate and the
floating point numbers they converted those strings to, and this script
checks that the results are correct.
You can run specific tests rather than all of them by giving their names
(without .rs extension) as command line parameters.
Verification
------------
The tricky part is not generating those inputs but verifying the outputs.
Comparing with the result of Python's float() does not cut it because
(and this is apparently undocumented) although Python includes a version of
Martin Gay's code including the decimal-to-float part, it doesn't actually use
it for float() (only for round()) instead relying on the system scanf() which
is not necessarily completely accurate.
Instead, we take the input and compute the true value with bignum arithmetic
(as a fraction, using the ``fractions`` module).
Given an input string and the corresponding float computed via Rust, simply
decode the float into f * 2^k (for integers f, k) and the ULP.
We can now easily compute the error and check if it is within 0.5 ULP as it
should be. Zero and infinites are handled similarly:
- If the approximation is 0.0, the exact value should be *less or equal*
half the smallest denormal float: the smallest denormal floating point
number has an odd mantissa (00...001) and thus half of that is rounded
to 00...00, i.e., zero.
- If the approximation is Inf, the exact value should be *greater or equal*
to the largest finite float + 0.5 ULP: the largest finite float has an odd
mantissa (11...11), so that plus half an ULP is rounded up to the nearest
even number, which overflows.
Implementation details
----------------------
This directory contains a set of single-file Rust programs that perform
tests with a particular class of inputs. Each is compiled and run without
parameters, outputs (f64, f32, decimal) pairs to verify externally, and
in any case either exits gracefully or with a panic.
If a test binary writes *anything at all* to stderr or exits with an
exit code that's not 0, the test fails.
The output on stdout is treated as (f64, f32, decimal) record, encoded thusly:
- First, the bits of the f64 encoded as an ASCII hex string.
- Second, the bits of the f32 encoded as an ASCII hex string.
- Then the corresponding string input, in ASCII
- The record is terminated with a newline.
Incomplete records are an error. Not-a-Number bit patterns are invalid too.
The tests run serially but the validation for a single test is parallelized
with ``multiprocessing``. Each test is launched as a subprocess.
One thread supervises it: Accepts and enqueues records to validate, observe
stderr, and waits for the process to exit. A set of worker processes perform
the validation work for the outputs enqueued there. Another thread listens
for progress updates from the workers.
Known issues
------------
Some errors (e.g., NaN outputs) aren't handled very gracefully.
Also, if there is an exception or the process is interrupted (at least on
Windows) the worker processes are leaked and stick around forever.
They're only a few megabytes each, but still, this script should not be run
if you aren't prepared to manually kill a lot of orphaned processes.
"""
from __future__ import print_function
import sys
import os
import time
import struct
from fractions import Fraction
from collections import namedtuple
from subprocess import Popen, check_call, PIPE
from glob import glob
import multiprocessing
import threading
import ctypes
import binascii
try: # Python 3
import queue as Queue
except ImportError: # Python 2
import Queue
NUM_WORKERS = 2
UPDATE_EVERY_N = 50000
INF = namedtuple('INF', '')()
NEG_INF = namedtuple('NEG_INF', '')()
ZERO = namedtuple('ZERO', '')()
MAILBOX = None # The queue for reporting errors to the main process.
STDOUT_LOCK = threading.Lock()
test_name = None
child_processes = []
exit_status = 0
def msg(*args):
with STDOUT_LOCK:
print("[" + test_name + "]", *args)
sys.stdout.flush()
def write_errors():
global exit_status
f = open("errors.txt", 'w')
have_seen_error = False
while True:
args = MAILBOX.get()
if args is None:
f.close()
break
print(*args, file=f)
f.flush()
if not have_seen_error:
have_seen_error = True
msg("Something is broken:", *args)
msg("Future errors logged to errors.txt")
exit_status = 101
def projectdir():
file = os.path.realpath(__file__)
return os.path.dirname(os.path.dirname(os.path.dirname(file)))
def targetdir():
return os.path.join(projectdir(), 'target')
def releasedir():
return os.path.join(targetdir(), 'release')
def rustc_lexical():
path = os.getcwd()
os.chdir(projectdir())
features = ['comprehensive_float_test']
features = '--features=' + ','.join(features)
check_call(['cargo', 'build', '--release', features])
os.chdir(path)
def run(test):
global test_name
test_name = test
t0 = time.clock()
msg("setting up supervisor")
exe = os.path.join(releasedir(), test)
proc = Popen(exe, bufsize=1<<20 , stdin=PIPE, stdout=PIPE, stderr=PIPE)
done = multiprocessing.Value(ctypes.c_bool)
queue = multiprocessing.Queue(maxsize=5)#(maxsize=1024)
workers = []
for n in range(NUM_WORKERS):
worker = multiprocessing.Process(name='Worker-' + str(n + 1),
target=init_worker,
args=[test, MAILBOX, queue, done])
workers.append(worker)
child_processes.append(worker)
for worker in workers:
worker.start()
msg("running test")
interact(proc, queue)
with done.get_lock():
done.value = True
for worker in workers:
worker.join()
msg("python is done")
assert queue.empty(), "did not validate everything"
dt = time.clock() - t0
msg("took", round(dt, 3), "seconds")
def interact(proc, queue):
n = 0
while proc.poll() is None:
line = proc.stdout.readline()
if not line:
continue
assert line.endswith(b'\n'), "incomplete line: " + repr(line)
queue.put(line)
n += 1
if n % UPDATE_EVERY_N == 0:
msg("got", str(n // 1000) + "k", "records")
msg("rust is done. exit code:", proc.returncode)
rest, stderr = proc.communicate()
if stderr:
msg("rust stderr output:", stderr)
for line in rest.split(b'\n'):
if not line:
continue
queue.put(line)
def main():
global MAILBOX
tests = [os.path.splitext(f)[0] for f in glob('*.rs')
if not f.startswith('_')]
whitelist = [i for i in sys.argv[1:] if not i.startswith('-')]
if whitelist:
tests = [test for test in tests if test in whitelist]
if not tests:
print("Error: No tests to run")
sys.exit(1)
# Compile first for quicker feedback
rustc_lexical()
# Set up mailbox once for all tests
MAILBOX = multiprocessing.Queue()
mailman = threading.Thread(target=write_errors)
mailman.daemon = True
mailman.start()
for test in tests:
if whitelist and test not in whitelist:
continue
run(test)
MAILBOX.put(None)
mailman.join()
# ---- Worker thread code ----
POW2 = { e: Fraction(2) ** e for e in range(-1100, 1100) }
HALF_ULP = { e: (Fraction(2) ** e)/2 for e in range(-1100, 1100) }
DONE_FLAG = None
def send_error_to_supervisor(*args):
MAILBOX.put(args)
def init_worker(test, mailbox, queue, done):
global test_name, MAILBOX, DONE_FLAG
test_name = test
MAILBOX = mailbox
DONE_FLAG = done
do_work(queue)
def is_done():
with DONE_FLAG.get_lock():
return DONE_FLAG.value
def do_work(queue):
while True:
try:
line = queue.get(timeout=0.01)
except Queue.Empty:
if queue.empty() and is_done():
return
else:
continue
bin64, bin32, text = line.rstrip().split()
validate(bin64, bin32, text.decode('utf-8'))
def decode_binary64(x):
"""
Turn a IEEE 754 binary64 into (mantissa, exponent), except 0.0 and
infinity (positive and negative), which return ZERO, INF, and NEG_INF
respectively.
"""
x = binascii.unhexlify(x)
assert len(x) == 8, repr(x)
[bits] = struct.unpack(b'>Q', x)
if bits == 0:
return ZERO
exponent = (bits >> 52) & 0x7FF
negative = bits >> 63
low_bits = bits & 0xFFFFFFFFFFFFF
if exponent == 0:
mantissa = low_bits
exponent += 1
if mantissa == 0:
return ZERO
elif exponent == 0x7FF:
assert low_bits == 0, "NaN"
if negative:
return NEG_INF
else:
return INF
else:
mantissa = low_bits | (1 << 52)
exponent -= 1023 + 52
if negative:
mantissa = -mantissa
return (mantissa, exponent)
def decode_binary32(x):
"""
Turn a IEEE 754 binary32 into (mantissa, exponent), except 0.0 and
infinity (positive and negative), which return ZERO, INF, and NEG_INF
respectively.
"""
x = binascii.unhexlify(x)
assert len(x) == 4, repr(x)
[bits] = struct.unpack(b'>I', x)
if bits == 0:
return ZERO
exponent = (bits >> 23) & 0xFF
negative = bits >> 31
low_bits = bits & 0x7FFFFF
if exponent == 0:
mantissa = low_bits
exponent += 1
if mantissa == 0:
return ZERO
elif exponent == 0xFF:
if negative:
return NEG_INF
else:
return INF
else:
mantissa = low_bits | (1 << 23)
exponent -= 127 + 23
if negative:
mantissa = -mantissa
return (mantissa, exponent)
MIN_SUBNORMAL_DOUBLE = Fraction(2) ** -1074
MIN_SUBNORMAL_SINGLE = Fraction(2) ** -149 # XXX unsure
MAX_DOUBLE = (2 - Fraction(2) ** -52) * (2 ** 1023)
MAX_SINGLE = (2 - Fraction(2) ** -23) * (2 ** 127)
MAX_ULP_DOUBLE = 1023 - 52
MAX_ULP_SINGLE = 127 - 23
DOUBLE_ZERO_CUTOFF = MIN_SUBNORMAL_DOUBLE / 2
DOUBLE_INF_CUTOFF = MAX_DOUBLE + 2 ** (MAX_ULP_DOUBLE - 1)
SINGLE_ZERO_CUTOFF = MIN_SUBNORMAL_SINGLE / 2
SINGLE_INF_CUTOFF = MAX_SINGLE + 2 ** (MAX_ULP_SINGLE - 1)
def validate(bin64, bin32, text):
try:
double = decode_binary64(bin64)
except AssertionError:
print(bin64, bin32, text)
raise
single = decode_binary32(bin32)
real = Fraction(text)
if double is ZERO:
if real > DOUBLE_ZERO_CUTOFF:
record_special_error(text, "f64 zero")
elif double is INF:
if real < DOUBLE_INF_CUTOFF:
record_special_error(text, "f64 inf")
elif double is NEG_INF:
if -real < DOUBLE_INF_CUTOFF:
record_special_error(text, "f64 -inf")
elif len(double) == 2:
sig, k = double
validate_normal(text, real, sig, k, "f64")
else:
assert 0, "didn't handle binary64"
if single is ZERO:
if real > SINGLE_ZERO_CUTOFF:
record_special_error(text, "f32 zero")
elif single is INF:
if real < SINGLE_INF_CUTOFF:
record_special_error(text, "f32 inf")
elif single is NEG_INF:
if -real < SINGLE_INF_CUTOFF:
record_special_error(text, "f32 -inf")
elif len(single) == 2:
sig, k = single
validate_normal(text, real, sig, k, "f32")
else:
assert 0, "didn't handle binary32"
def record_special_error(text, descr):
send_error_to_supervisor(text.strip(), "wrongly rounded to", descr)
def validate_normal(text, real, sig, k, kind):
approx = sig * POW2[k]
error = abs(approx - real)
if error > HALF_ULP[k]:
record_normal_error(text, error, k, kind)
def record_normal_error(text, error, k, kind):
one_ulp = HALF_ULP[k + 1]
assert one_ulp == 2 * HALF_ULP[k]
relative_error = error / one_ulp
text = text.strip()
try:
err_repr = float(relative_error)
except ValueError:
err_repr = str(err_repr).replace('/', ' / ')
send_error_to_supervisor(err_repr, "ULP error on", text, "(" + kind + ")")
if __name__ == '__main__':
main()
|
simplest.py | import multiprocessing as mp
import time
def worker(x):
pid = mp.current_process().name;
print("x = {} in {}".format(x, pid))
time.sleep(2)
if __name__ == '__main__':
# create the process
p = mp.Process(target=worker, args=(42,))
# start the process:
p.start()
# wait until process completes:
p.join()
print("Worker did its job as separate Process!")
|
druid.py | try:
from pydruid.db import connect
enabled = True
except ImportError:
enabled = False
from redash.query_runner import BaseQueryRunner, register, JobTimeoutException
from redash.query_runner import TYPE_STRING, TYPE_INTEGER, TYPE_BOOLEAN, TYPE_FLOAT
from redash.utils import enum, json_dumps, json_loads
from six.moves import urllib
from base64 import b64encode
import random
import sqlite3
import re
import threading
#import logging
from redash.worker import get_job_logger
TYPES_MAP = {1: TYPE_STRING, 2: TYPE_INTEGER, 3: TYPE_BOOLEAN}
PYTHON_TYPES_MAP = {"str": TYPE_STRING, "int": TYPE_INTEGER, "bool": TYPE_BOOLEAN, "float": TYPE_FLOAT}
SQLITE_TYPES_MAP = {TYPE_STRING: "TEXT", TYPE_INTEGER: "INTEGER", TYPE_FLOAT: "NUMERIC"}
QueryMode = enum(
'QueryMode',
DRUID_SQL='DruidSql', #基本查询:向druid发起sql查询,带不带context两种方式
DRUID_JSON='DruidJson', #基本查询:向druid发起原生json格式的查询
SQLITE='Sqlite', #基本查询:向sqlite发起查询
CUSTOM='Custom' #复杂查询
)
QUERY_MODE_SQLITE_PREFIX = "SQLITE:"
def get_logger():
return get_job_logger(__name__)
#表名转换
def ReplaceTableName(querystr, old_name, new_name):
'''
#return querystr.replace(old_name, new_name)
TABLE_NAME_REPL_REG = "(\s|\))(TABLE_NAME)(\s|\(|\)|\.|$)"
正则替换时,索引0是整体,接下来是按次序出现的每个(,这里保留1和3,把2换掉
'''
pattern = "(\s|\))(" + old_name + ")(\s|\(|\)|\.|$)"
return re.sub(pattern, lambda x:x.group(1) + new_name + x.group(3), querystr, flags=re.I)
#判断是否是创建表的SQL语句
CREATE_TABLE_SQL_REG = re.compile("(\s*CREATE\s+(TEMPORARY\s+)*TABLE\s+(IF\s+NOT\s+EXISTS\s+)?)", flags=re.I)
def IsCreateTableSql(querystr):
m = CREATE_TABLE_SQL_REG.findall(querystr)
if m:
return True
else:
return False
#找出CREATE TABLE语句中的表名
TABLE_NAME_TO_CREATE_REG = re.compile("\s*CREATE\s+(TEMPORARY\s+)*TABLE\s+(IF\s+NOT\s+EXISTS\s+)?(\w+)[\s\(]", flags=re.I)
REG_MATCH_TABLE_NAME_INDEX = 2
def GetTableNameToCreate(querystr):
m = TABLE_NAME_TO_CREATE_REG.findall(querystr)
if m:
tabel_name = ''
for i in range(0, len(m)):
tabel_name = tabel_name + m[i][2]
return tabel_name
else:
return None
#禁止执行的SQL行为:DATABASE、ALTER、RENAME
FORBIDDEN_SQL_REG = re.compile("(\s+(DATABASE)\s+)|((\s+|:{1}|^)(ALTER|RENAME)\s+)", flags=re.I)
def CheckForbiddenSql(querystr):
m = FORBIDDEN_SQL_REG.findall(querystr)
if m:
forbidden_part = "%s%s" % (m[0][1], m[0][4])
return forbidden_part
else:
return None
#删除注释(/**/)
COMMENT_REG = re.compile("(/\*([\S\s]*?)\*/)")
class CustomException(Exception):
def __init__(self, info):
self.info = info
def __str__(self):
return self.info
def read(self):
return self.info
class Result(object):
def __init__(self):
pass
class Druid(BaseQueryRunner):
noop_query = "SELECT 1"
sqlite_dbpath = "druid_sqlite.db"
'''
{"Username": "13436361@qq.com", "Query ID": "7", "Queue": "queries", "Enqueue Time": 1597988440.3357646,
"Job ID": "20529e79-80da-4de7-bfe0-bea63a35c9e8", "Query Hash": "add2ea64feea932bc1a12a20cdb29bc5", "Scheduled": false}
'''
metadata = {}
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"host": {"type": "string", "default": "localhost"},
"port": {"type": "number", "default": 8082},
"scheme": {"type": "string", "default": "http"},
"user": {"type": "string"},
"password": {"type": "string"},
},
"order": ["scheme", "host", "port", "user", "password"],
"required": ["host"],
"secret": ["password"],
}
@classmethod
def enabled(cls):
return enabled
def get_logger(self):
#logger = logging.getLogger("druid")
return get_job_logger(__name__)
def _log_debug(self, message):
get_logger().debug("###druid### [query_id=%s] [query_hash=%s], %s",
self.metadata.get("Query ID", "unknown"),
self.metadata.get("Query Hash", "unknown"),
message,
)
def _log_info(self, message):
get_logger().info("###druid### [query_id=%s] [query_hash=%s], %s",
self.metadata.get("Query ID", "unknown"),
self.metadata.get("Query Hash", "unknown"),
message,
)
def _log_warning(self, message):
get_logger().warning("###druid### [query_id=%s] [query_hash=%s], %s",
self.metadata.get("Query ID", "unknown"),
self.metadata.get("Query Hash", "unknown"),
message,
)
def _log_error(self, message):
get_logger().error("###druid### [query_id=%s] [query_hash=%s], %s",
self.metadata.get("Query ID", "unknown"),
self.metadata.get("Query Hash", "unknown"),
message,
)
def run_query(self, query, user):
json_data, error = self.run_query_obj_result(query, user, sqlite_query_param={})
if error is not None:
self._log_error(error)
if json_data is not None:
json_str = json_dumps(json_data)
#print(json_str)
else:
json_str = ""
return json_str, error
def run_query_obj_result(self, query, user, sqlite_query_param):
'''
postman方式:
POST http://10.15.101.10:5000/api/queries/7/results
body格式
{
"id": "7",
"parameters": {
"start_time_bc": "2020-02-01T00:00:00",
"end_time_bc": "2020-03-01T00:00:00",
"start_time_tb": "2019-02-01T00:00:00",
"end_time_tb": "2019-03-01T00:00:00",
"start_time_hb": "2020-01-01T00:00:00",
"end_time_hb": "2020-02-01T00:00:00"
},
"max_age": -1 //0表示强制刷新;-1表示有缓存就取缓存;其他正数表示一定秒数内的缓存
}
输出这样的格式:
{
"columns":
[
{"name": "daytime", "friendly_name": "daytime", "type": "string"},
{"name": "TOUR_DEST", "friendly_name": "TOUR_DEST", "type": "string"},
{"name": "orders", "friendly_name": "orders", "type": "integer"},
{"name": "cpo", "friendly_name": "cpo", "type": "integer"}
],
"rows":
[
{"daytime": "2020-01-02T00:00:00.000Z", "TOUR_DEST": "", "orders": 1.0, "cpo": 0.0},
{"daytime": "2020-01-22T00:00:00.000Z", "TOUR_DEST": "\u8d35\u5dde", "orders": 1.9999999675783329, "cpo": 297.29051284564537}
]
}
'''
querystr = self.remove_comments(query)
query_mode, query_obj = self.get_query_mode(querystr)
self._log_info("query=#####%s#####, mode=%s" % (querystr, query_mode))
if query_mode == QueryMode.DRUID_SQL:
if query_obj is not None:
querystr = query_obj["sql"]
context = query_obj["context"]
else:
context = {}
json_data, error = self.run_sql_query(querystr, context, user)
elif query_mode == QueryMode.DRUID_JSON:
json_data, error = self.run_native_query(querystr, user)
elif query_mode == QueryMode.SQLITE:
json_data, error = self.run_sqlite_query(querystr, sqlite_query_param)
else:
json_data, error = self.run_custom_query(querystr, user)
return json_data, error
def _run_query_threading(self, query, user, sqlite_query_param, result):
result.json_data = None
result.error = None
try:
result.json_data, result.error = self.run_query_obj_result(query, user, sqlite_query_param)
except Exception as e:
result.error = str(e)
finally:
pass
def remove_comments(self, querystr):
'''
参见_annotate_query,开头加了类似这样的注释:
/* Username: 13436361@qq.com, Query ID: 4, Queue: queries,
Job ID: 51003672-2c5b-4705-850e-27efc8b0b881,
Query Hash: a79e88ed1a8adf112794e614966d547e, Scheduled: False */
现在已经被我改成json了, 如下
/* {"Username": "13436361@qq.com", "Query ID": "7", "Queue": "queries", "Enqueue Time": 1597988440.3357646,
"Job ID": "20529e79-80da-4de7-bfe0-bea63a35c9e8", "Query Hash": "add2ea64feea932bc1a12a20cdb29bc5", "Scheduled": false} */
'''
if querystr[0:2] == "/*":
index = querystr.find("*/")
comment = querystr[2:index]
self.metadata = json_loads(comment)
querystr = querystr[index+2:]
querystr = COMMENT_REG.sub(" ", querystr)
querystr = querystr.strip()
return querystr
def get_query_mode(self, querystr):
first_char = querystr[0]
if first_char == "{":
query_obj = json_loads(querystr)
if query_obj.get("context") != None and query_obj.get("sql") != None:
return QueryMode.DRUID_SQL, query_obj
else:
return QueryMode.DRUID_JSON, None
elif first_char == "X":
return QueryMode.CUSTOM, None
elif querystr.find(QUERY_MODE_SQLITE_PREFIX) == 0:
return QueryMode.SQLITE, None
else:
return QueryMode.DRUID_SQL, None
def run_sql_query(self, query, context, user):
#context = {"useApproximateCountDistinct": False}
connection = connect(
host=self.configuration["host"],
port=self.configuration["port"],
path="/druid/v2/sql/",
scheme=(self.configuration.get("scheme") or "http"),
user=(self.configuration.get("user") or None),
password=(self.configuration.get("password") or None),
context=context,
)
cursor = connection.cursor()
try:
cursor.execute(query)
if cursor.description is not None:
columns = self.fetch_columns(
[(i[0], TYPES_MAP.get(i[1], None)) for i in cursor.description]
)
rows = [
dict(zip((column["name"] for column in columns), row)) for row in cursor
]
data = {"columns": columns, "rows": rows}
error = None
#json_data = json_dumps(data)
#print(json_data)
else:
data = {"columns": [], "rows": []}
error = None #如果结果就是没数据,那么不返会错误
finally:
connection.close()
return data, error
def run_native_query(self, querystr, user):
#pydruid搜索_prepare_url_headers_and_body和_stream_query
host = self.configuration["host"]
port = self.configuration["port"]
username = (self.configuration.get("user") or None)
password = (self.configuration.get("password") or None)
url = "http://{}:{}/druid/v2/?pretty".format(host, port)
headers = {"Content-Type": "application/json"}
if (username is not None) and (password is not None):
authstring = "{}:{}".format(username, password)
b64string = b64encode(authstring.encode()).decode()
headers["Authorization"] = "Basic {}".format(b64string)
error = None
try:
b = querystr.encode('utf-8')
req = urllib.request.Request(url, b, headers, method="POST")
res = urllib.request.urlopen(url=req, cafile=None)
data = res.read().decode("utf-8")
res.close()
except urllib.error.HTTPError as e:
error = e.read()
json_str = None
raise
except (KeyboardInterrupt, InterruptException, JobTimeoutException):
raise
else:
raw_json_data = json_loads(data)
final_json_data = self.post_process_native_result(raw_json_data)
#json_str = json_dumps(final_json_data)
return final_json_data, error
def post_process_native_result(self, raw_json_data):
'''
最终的输出目标格式
{
"columns":
[
{"name": "daytime", "friendly_name": "daytime", "type": "string"},
{"name": "TOUR_DEST", "friendly_name": "TOUR_DEST", "type": "string"},
{"name": "orders", "friendly_name": "orders", "type": "integer"},
{"name": "cpo", "friendly_name": "cpo", "type": "integer"}
],
"rows":
[
{"daytime": "2020-01-02T00:00:00.000Z", "TOUR_DEST": "", "orders": 1.0, "cpo": 0.0},
{"daytime": "2020-01-22T00:00:00.000Z", "TOUR_DEST": "\u8d35\u5dde", "orders": 1.9999999675783329, "cpo": 297.29051284564537}
]
}
'''
columns = []
rows = []
final_json_data = {"columns" : columns, "rows" : rows}
for obj1 in iter(raw_json_data):
if not "result" in obj1.keys():
rows.append(obj1)
continue
result = obj1["result"]
if type(result).__name__ !="list":
rows.append(obj1)
continue
row_common = {}
for (k,v) in obj1.items():
if k != "result":
row_common[k] = v
for obj2 in iter(result):
row = row_common.copy();
for (k,v) in obj2.items():
row[k] = v
rows.append(row)
if len(rows) > 0:
row = rows[0]
for (column_name, column_value) in row.items():
columns.append(
{"name": column_name, "friendly_name": column_name, "type": PYTHON_TYPES_MAP[type(column_value).__name__]}
)
return final_json_data
def run_custom_query(self, querystr, user):
'''
例子1,子查询是个sql:
X{
"store_to_db": false,
"tables": [
{
"table_name": "tablea",
"datetime_column": "daytime",
"query": {
"context": {"useApproximateCountDistinct": false},
"sql": "SELECT DATE_TRUNC('day', __time) as daytime,PV_SRC_GEO_LOCATION,sum(AD_CLICK_COUNT) as click, sum(AD_CLICK_COUNT*KW_AVG_COST) as cost FROM travels_demo where EVENT_TYPE='被展现' group by PV_SRC_GEO_LOCATION,DATE_TRUNC('day', __time) order by daytime"
},
"nodata_procs": [
"SQLITE:CREATE TABLE tablea (daytime DATETIME, PV_SRC_GEO_LOCATION TEXT, click INTEGER, cost NUMERIC)",
"SQLITE:INSERT INTO tablea VALUES('2020-01-01T00:00:00.000Z', 'CHINA', 252, 848.74)"
]
},
{
"table_name": "tableb",
"datetime_column": "daytime",
"query": "SQLITE:SELECT * FROM tablea"
}
],
"main_query": "SQLITE:SELECT daytime, PV_SRC_GEO_LOCATION, click, cost FROM tableb",
"final_sql": "SELECT daytime, PV_SRC_GEO_LOCATION, click, cost FROM tableb",
"persist_table_name": "some_long_name_table_1",
"persist_datetime_column": "daytime",
"sub_queries":[
{
"name": "exdata1",
"query":"SQLITE:SELECT daytime, click, cost FROM tablea",
"persist_table_name": "some_long_name_table_2",
"persist_datetime_column": "daytime"
}
]
}
例子2,子查询是个json:
X{
"tables": [
{
"table_name": "tablea",
"datetime_column": "daytime",
"query":
{
"aggregations": [
{
"type": "doubleSum",
"name": "showCount",
"fieldName": "AD_SHOW_COUNT"
},
{
"type": "doubleSum",
"name": "realcost",
"fieldName": null,
"expression": "(AD_CLICK_COUNT * KW_AVG_COST)"
},
{
"type": "doubleSum",
"name": "a1",
"fieldName": "AD_CLICK_COUNT"
}
],
"postAggregations": [
{
"type": "expression",
"name": "click_per_cost",
"expression": "(realcost / a1)",
"ordering": null
}
],
"filter": {
"type": "selector",
"dimension": "EVENT_TYPE",
"value": "数据报告"
},
"dataSource": "travels_demo",
"dimension": "KEYWORD",
"granularity": "day",
"intervals": [
"1900-01-09T00:00:00.000Z/2992-01-10T00:00:00.000Z"
],
"metric": "realcost",
"queryType": "topN",
"threshold": 30
}
}
],
"main_query": "SQLITE:SELECT * FROM tablea"
}
'''
error = None
json_data = None
#解析
querystr = querystr[1:] #去掉X
try:
input_obj = json_loads(querystr)
except:
error = "Incorrect Json format."
if error is not None:
raise CustomException(error)
#threading: 是否使用多线程进行查询
use_threading = input_obj.get("threading")
if use_threading is None:
use_threading = True
#store_to_db: 查询结果是否保存为sqlite的表,如果是,后续还得指定persist_table_name
# 不需要可以不填,默认是False
store_to_db = input_obj.get("store_to_db")
if store_to_db is None:
store_to_db = False
#tables: 一系列辅助查询的过渡表,顺序执行,后续的表可以以来前面的表
# 不需要可以不填
tables = input_obj.get("tables")
if (tables is not None) and (type(tables).__name__ !="list"):
raise CustomException("Incorrect Json data: tables must be a list.")
#main_query: 主查询,查询结果存放在query_result["data"]中
# 不需要可以不填
main_query = input_obj.get("main_query")
if main_query is not None:
if type(main_query).__name__ =="str":
pass
elif type(main_query).__name__ =="dict":
main_query = json_dumps(main_query)
else:
raise CustomException("Incorrect Json data: main_query must be a string or json format.")
#final_sql: 兼容,也是主查询,但只能从SQLITE中查结果;在有main_query的情况下,此项无效
# 不需要可以不填
final_sqlite_query = input_obj.get("final_sql")
if (final_sqlite_query is not None) and (type(final_sqlite_query).__name__ !="str"):
raise CustomException("Incorrect Json data: final_sql must be a string.")
#persist_table_name: store_to_db为true的情况下,保存主查询数据的表名
#persist_datetime_column: 查询结果中的时间项
# 不需要可以不填
persist_table_name = None
persist_datetime_column = None
if store_to_db and (main_query is not None or final_sqlite_query is not None):
persist_table_name = input_obj.get("persist_table_name")
if persist_table_name is None or type(persist_table_name).__name__ !="str":
raise CustomException("Incorrect Json data: persist_table_name for main query must be a string.")
persist_datetime_column = input_obj.get("persist_datetime_column")
if persist_datetime_column is not None and type(persist_datetime_column).__name__ !="str":
raise CustomException("Incorrect Json data: persist_datetime_column for main query must be a string.")
#sub_queries: 子查询,查询结果存放在query_result["data_ex"]中
# 不需要可以不填
sub_queries = input_obj.get("sub_queries")
if (sub_queries is not None) and (type(sub_queries).__name__ !="list"):
raise CustomException("Incorrect Json data: sub_queries must be a string.")
#对tables中的临时表名的随机化
table_name_map = {}
#创建sqlite
sqlite_connection = sqlite3.connect(self.sqlite_dbpath)
sqlite_cursor = sqlite_connection.cursor()
sqlite_query_param = {"table_name_map": table_name_map, "can_create_table": False}
try:
#一、依次处理单个表
if tables is not None:
for table_cofig in tables:
#json配置
name = table_cofig.get("table_name")
if (name is None) or (type(name).__name__ !="str"):
raise CustomException("Incorrect Json data: table_name can't be none and must be a string.")
self._log_info("Processing Table[%s]" % name)
datetime_column = table_cofig.get("datetime_column")
if (datetime_column is not None) and (type(datetime_column).__name__ !="str"):
raise CustomException("Incorrect Json data in table %s: datetime_column must be a string." % name)
table_query = table_cofig.get("query")
if table_query is None:
raise CustomException("Incorrect Json data in table %s: query must exist." % name)
if type(table_query).__name__ =="str":
pass
elif type(table_query).__name__ =="dict":
table_query = json_dumps(table_query)
else:
raise CustomException("Incorrect Json data in table %s: query must be a string or json format." % name)
nodata_procs = table_cofig.get("nodata_procs")
if (nodata_procs is not None) and (type(nodata_procs).__name__ !="list"):
raise CustomException("Incorrect Json data in table %s: nodata_procs must be a list." % name)
#查询
query_data, query_error = self.run_query_obj_result(table_query, user, sqlite_query_param)
if query_error is not None:
raise CustomException(query_error)
if (query_data is None) or query_data.get("columns") is None:
raise CustomException("Incorrect query data for table %s." % name)
#存储
rand_num = random.randint(100000,999999)
table_name = name + str(rand_num)
table_name_map[name] = table_name
if len(query_data["columns"]) > 0:
self.store_data_to_sqlite(sqlite_connection, sqlite_cursor, query_data, table_name, datetime_column, drop_before_create = False)
#查询返回无数据的处理
elif nodata_procs is not None:
self._log_info("Using nodata_procs to build table: %s." % name)
sqlite_query_param["can_create_table"] = True
for proc in nodata_procs:
if type(proc).__name__ !="str":
raise CustomException("Incorrect Json data in table %s: nodata_procs must be a string list." % name)
t = GetTableNameToCreate(proc)
if t is not None and t != name:
raise CustomException("[nodata_procs]Invalid table name(%s) to create in table %s." % (t, name))
query_data, query_error = self.run_query_obj_result(proc, user, sqlite_query_param)
if query_error is not None:
raise CustomException(query_error)
sqlite_query_param["can_create_table"] = False
else:
pass
#二、执行主查询
if main_query is not None:
self._log_info("Processing Main Query:#####%s#####" % main_query)
json_data, error = self.run_query_obj_result(main_query, user, sqlite_query_param)
if error is not None:
raise CustomException(error)
if (json_data is None) or json_data.get("columns") is None:
raise CustomException("Incorrect query_data for main query.")
elif final_sqlite_query is not None:
for (k,v) in table_name_map.items():
final_sqlite_query = ReplaceTableName(final_sqlite_query, k, v)
self._log_info("Processing Final SQL:#####%s#####" % final_sqlite_query)
sqlite_cursor.execute(final_sqlite_query)
if sqlite_cursor.description is not None:
columns = self.fetch_columns([(i[0], None) for i in sqlite_cursor.description])
rows = [
dict(zip((column["name"] for column in columns), row))
for row in sqlite_cursor
]
error = None
#columns里的type全是null
columns = []
if len(rows) > 0:
row = rows[0]
for (column_name, column_value) in row.items():
columns.append(
{"name": column_name, "friendly_name": column_name, "type": PYTHON_TYPES_MAP[type(column_value).__name__]}
)
json_data = {"columns": columns, "rows": rows}
else:
#error = "Query completed but it returned no data."
#json_data = None
error = None
json_data = {"columns": [], "rows": []}
else:
json_data = {"columns": [], "rows": []}
error = None
#存储
if store_to_db and error is None and len(json_data["columns"]) > 0:
self.store_data_to_sqlite(sqlite_connection, sqlite_cursor, json_data, persist_table_name, persist_datetime_column, drop_before_create = True)
json_data = {"columns": [], "rows": []}
#三、执行子查询
if sub_queries is not None:
json_data["data_ex"] = []
if use_threading:
threads = []
for query_config in sub_queries:
#json配置
name = query_config.get("name")
if (name is None) or (type(name).__name__ !="str"):
raise CustomException("Incorrect Json data in sub_queries: name must be exist and must be a string.")
sub_query = query_config.get("query")
if sub_query is None:
raise CustomException("Incorrect Json data in sub_query %s: query must be exist." % name)
if type(sub_query).__name__ =="str":
pass
elif type(sub_query).__name__ =="dict":
sub_query = json_dumps(sub_query)
else:
raise CustomException("Incorrect Json data in sub_query %s: query must be a string or json format." % name)
sub_persist_table_name = None
sub_persist_datetime_column = None
if store_to_db:
sub_persist_table_name = query_config.get("persist_table_name")
if sub_persist_table_name is None or type(sub_persist_table_name).__name__ !="str":
raise CustomException("Incorrect Json data in sub_query %s: persist_table_name must be a string." % name)
sub_persist_datetime_column = query_config.get("persist_datetime_column")
if sub_persist_datetime_column is not None and type(sub_persist_datetime_column).__name__ !="str":
raise CustomException("Incorrect Json data in sub_query %s: persist_datetime_column must be a string." % name)
if use_threading:
r = Result()
r.config = query_config
t = threading.Thread(target=self._run_query_threading, args=(sub_query, user, sqlite_query_param, r))
threads.append({"t": t, "r": r})
t.start()
else:
#查询
self._log_info("Processing Sub Query:#####%s#####" % sub_query)
query_data, query_error = self.run_query_obj_result(sub_query, user, sqlite_query_param)
if query_error is not None:
raise CustomException(query_error)
if (query_data is None) or query_data.get("columns") is None:
raise CustomException("Incorrect query data for sub query %s." % name)
#存储
if store_to_db:
if query_error is None and len(query_data["columns"]) > 0:
self.store_data_to_sqlite(sqlite_connection, sqlite_cursor, query_data, sub_persist_table_name, sub_persist_datetime_column, drop_before_create = True)
else:
json_data["data_ex"].append({"name": name, "data": query_data})
if use_threading:
for itor in threads:
itor["t"].join()
for itor in threads:
r = itor["r"]
query_data = r.json_data
query_error = r.error
if query_error is not None:
raise CustomException(query_error)
if (query_data is None) or query_data.get("columns") is None:
name = r.config["name"]
raise CustomException("Incorrect query data for sub query %s." % name)
for itor in threads:
r = itor["r"]
query_data = r.json_data
query_error = r.error
if store_to_db:
if query_error is None and len(query_data["columns"]) > 0:
sub_persist_table_name = r.config["persist_table_name"]
sub_persist_datetime_column = r.config.get("persist_datetime_column")
self.store_data_to_sqlite(sqlite_connection, sqlite_cursor, query_data, sub_persist_table_name, sub_persist_datetime_column, drop_before_create = True)
else:
name = r.config["name"]
json_data["data_ex"].append({"name": name, "data": query_data})
except CustomException as e:
error = e.read()
#sqlite_connection.cancel()
except JobTimeoutException:
error = "Query exceeded Redash query execution time limit."
#sqlite_connection.cancel()
except Exception as e:
error = str(e)
#sqlite_connection.cancel()
finally:
#删除所有数据表
for (k,v) in table_name_map.items():
drop_table_sql = "DROP TABLE IF EXISTS " + v + ";"
self._log_info(drop_table_sql)
sqlite_cursor.execute(drop_table_sql)
sqlite_connection.commit()
sqlite_connection.close()
if error is not None:
raise CustomException(error)
return json_data, error
def store_data_to_sqlite(self, sqlite_connection, sqlite_cursor, query_data, table_name, datetime_column, drop_before_create = False):
#删表
if drop_before_create:
drop_table_sql = "DROP TABLE IF EXISTS " + table_name + ";"
self._log_info(drop_table_sql)
sqlite_cursor.execute(drop_table_sql)
sqlite_connection.commit()
#创建表
create_table_sql = "CREATE TABLE " + table_name + "("
colume_index = 0
for colume in query_data["columns"]:
if datetime_column is not None and colume["name"] == datetime_column:
type_str = "DATETIME"
else:
type_str = SQLITE_TYPES_MAP.get(colume["type"])
if type_str is None:
type_str = "TEXT"
if colume_index != 0:
create_table_sql = create_table_sql + ", "
colume_index += 1
create_table_sql = create_table_sql + colume["name"] + " " + type_str
create_table_sql = create_table_sql + ");"
self._log_info(create_table_sql)
sqlite_cursor.execute(create_table_sql)
#插入数据
row_index = 0
for row in query_data["rows"]:
insert_sql = "INSERT INTO " + table_name + " VALUES("
colume_index = 0
for colume in query_data["columns"]:
if colume_index != 0:
insert_sql = insert_sql + ", "
colume_index += 1
value = row[colume["name"]]
if colume["type"] == "string":
value = "\"" + value + "\""
else:
value = str(value)
insert_sql = insert_sql + value
insert_sql = insert_sql + ");"
if row_index == 0:
self._log_info(insert_sql)
sqlite_cursor.execute(insert_sql)
row_index += 1
#提交:不然接下来的别的Cursor可能查不到更新的数据
sqlite_connection.commit()
def run_sqlite_query(self, querystr, sqlite_query_param):
tables = []
querystr = querystr.replace(QUERY_MODE_SQLITE_PREFIX, '')
table_name_map = sqlite_query_param.get("table_name_map")
if table_name_map is not None:
for (k,v) in table_name_map.items():
querystr = ReplaceTableName(querystr, k, v)
self._log_info(querystr)
error = None
json_data = None
can_create_table = sqlite_query_param.get("can_create_table")
if not can_create_table:
table_name = GetTableNameToCreate(querystr)
if table_name is not None:
raise CustomException("No permission to create table %s!" % table_name)
forbidden_part = CheckForbiddenSql(querystr)
if forbidden_part is not None:
raise CustomException("No permission to %s " % forbidden_part)
sqlite_connection = sqlite3.connect(self.sqlite_dbpath)
sqlite_cursor = sqlite_connection.cursor()
try:
sqlite_cursor.execute(querystr)
sqlite_connection.commit()
if sqlite_cursor.description is not None:
columns = self.fetch_columns([(i[0], None) for i in sqlite_cursor.description])
rows = [
dict(zip((column["name"] for column in columns), row))
for row in sqlite_cursor
]
#columns里的type全是null
columns = []
if len(rows) > 0:
row = rows[0]
for (column_name, column_value) in row.items():
columns.append(
{"name": column_name, "friendly_name": column_name, "type": PYTHON_TYPES_MAP[type(column_value).__name__]}
)
else:
self._log_warning("run_sqlite_query, NO DATA IN rows")
json_data = {"columns": columns, "rows": rows}
else:
#error = "Query completed but it returned no data."
#json_data = None
error = None
json_data = {"columns": [], "rows": []}
except Exception as e:
error = str(e)
#sqlite_connection.cancel()
finally:
sqlite_connection.close()
if error is not None:
raise CustomException(error)
return json_data, error
def get_schema(self, get_stats=False):
query = """
SELECT TABLE_SCHEMA,
TABLE_NAME,
COLUMN_NAME
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_SCHEMA <> 'INFORMATION_SCHEMA'
"""
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed getting schema.")
schema = {}
results = json_loads(results)
for row in results["rows"]:
table_name = "{}.{}".format(row["TABLE_SCHEMA"], row["TABLE_NAME"])
if table_name not in schema:
schema[table_name] = {"name": table_name, "columns": []}
schema[table_name]["columns"].append(row["COLUMN_NAME"])
return list(schema.values())
register(Druid)
|
testTPSScript.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from chain.const import CONFIG, IP_CONFIG
from chain.hibechain import HIBEChain
# from chain.singlechain import SingleChain
from chain.gethnode import GethNode
from chain.iplist import IPList
from chain.conf import load_config_file
from typing import Optional
from random import randint
from datetime import datetime
import threading
import time
ip_list = IPList(IP_CONFIG)
id_list, thresh_list = load_config_file(CONFIG)
print(id_list)
print(thresh_list)
node_count = sum(n for (n, t) in thresh_list)
print('-----')
print('node_count:', node_count)
print('-----')
# -------------- clear containers -----------------------
ip_list.stop_all_containers()
# ip_list.remove_all_containers()
# -------------------------------------------------------
def get_pf(pf: Optional[list], node: GethNode, pf_list: Optional[list], index: int) -> None:
try:
result = node.get_transaction_proof_by_proof(pf)
except RuntimeError as e:
time.sleep(0.2)
result = None
print(e)
pf_list[index] = result
start_time = time.time()
hibe = HIBEChain(id_list, thresh_list, ip_list)
hibe.construct_hibe_chain()
connect_time = time.time()
waiting_time = max([chain.node_count for chain in hibe.structured_chains[0]]) // 5
print('another %d seconds waiting for addPeer' % waiting_time)
time.sleep(waiting_time)
if not hibe.is_connected():
# raise RuntimeError('connection is not ready')
print('connection is not ready')
else:
print('connected')
hibe.set_number()
hibe.set_level()
hibe.set_id()
end_time = time.time()
print("connect time %.3fs" % (connect_time-start_time))
set_up_time = end_time - start_time
print("set up time %.3fs" % set_up_time)
root_chain = hibe.get_chain('')
root = root_chain.get_node_by_index(1)
terminal_chains = hibe.structured_chains[-1]
terminal_nodes = [terminal_chain.get_node_by_index(1) for terminal_chain in terminal_chains]
leaf_chains = {hibe.get_chain(terminal_chain.get_parent_chain_id()) for terminal_chain in terminal_chains}
leaf_chains = list(leaf_chains)
leaf_nodes = [leaf_chain.get_node_by_index(1) for leaf_chain in leaf_chains]
time.sleep(hibe.max_level*5)
# ----------------test tps ----------------------
threads = []
for chain in hibe.structured_chains[:-1]:
for node in chain:
t = threading.Thread(target=node.start_miner)
time.sleep(0.02)
threads.append(t)
t.start()
for t in threads:
t.join()
time.sleep(hibe.max_level)
print('number of terminal nodes:', len(terminal_nodes))
# send transactions
print('sending transactions...')
threads = []
for terminal_node in terminal_nodes:
t = threading.Thread(target=terminal_node.send_transaction3, args=(5721, 1, 0, 1, 10))
t.start()
threads.append(t)
for t in threads:
t.join()
time.sleep(hibe.max_level * 15)
'''
for index, terminal_node in terminal_nodes:
block_index = 0
while leaf_nodes[index].get_block_transaction_count(block_index) == 0:
block_index += 1
time.sleep(0.2)
print('-----leaf chain block index is %s-----' % block_index)
tx_hash = leaf_nodes[index].get_transaction_by_block_number_and_index(block_index, 1)
pf = leaf_nodes[0].get_transaction_proof_by_hash(tx_hash)
current_chain = leaf_chains[0]
current_node = leaf_nodes[0]
current_pf = pf
leaf_chains[0].get_log(leaf_nodes[0].node_index)
time.sleep(0.5)
t0 = leaf_chains[0].search_log(leaf_nodes[0].node_index, block_index-1)
t1 = leaf_chains[0].search_log(leaf_nodes[0].node_index, block_index)
tps = []
'''
'''
block_index = 1
tx_hash = leaf_node.get_transaction_by_block_number_and_index(block_index, 1)
while not tx_hash:
block_index += 1
print('waiting tx hash')
time.sleep(0.05)
tx_hash = leaf_node.get_transaction_by_block_number_and_index(block_index, 1)
pf = leaf_node.get_transaction_proof_by_hash(tx_hash)
current_chain = leaf_chain
current_node = leaf_node
current_pf = pf
block_index = 1
leaf_chain.get_log(leaf_node.node_index)
time.sleep(0.5)
leaf_chain.search_log(leaf_node.node_index, block_index)
timestamp_leaf = int(leaf_node.get_block_by_index(1)['timestamp'], 16)
for i in range(hibe.max_level-1):
current_chain = hibe.get_chain(current_chain.get_parent_chain_id())
# threads = []
# pf_list = [None] * current_chain.node_count
# while sum([True if pf else False for pf in pf_list]) < current_chain.threshold:
# # if sum([True if pf else False for pf in pf_list]) <= current_chain.threshold // 3:
# # time.sleep(10)
# # elif sum([True if pf else False for pf in pf_list]) <= current_chain.threshold // 2:
# # time.sleep(5)
# for index, node in enumerate(current_chain.nodes):
# if not pf_list[index]:
# t = threading.Thread(target=get_pf, args=(current_pf, node, pf_list, index))
# t.start()
# threads.append(t)
# for t in threads:
# t.join()
# for i in range(current_chain.node_count):
# if pf_list[i]:
# current_pf = pf_list[i]
# break
while True:
try:
search_index = randint(1, current_chain.node_count)
print(search_index)
current_node = current_chain.get_node_by_index(search_index)
tmp_pf = current_node.get_transaction_proof_by_proof(current_pf)
if tmp_pf:
block_index = int(tmp_pf[-1], 16)
print('block index is %d.' % block_index)
current_chain.get_log(current_node.node_index)
time.sleep(0.5)
current_chain.search_log(current_node.node_index, block_index)
timestamp_current = current_node.get_block_by_index(block_index)['timestamp']
tmp_pf = tmp_pf[:-1]
break
except RuntimeError as e:
time.sleep(15)
print(e)
current_pf = tmp_pf
end_time = time.time()
timestamp_root = int(root.get_block_by_index(block_index)['timestamp'], 16)
latency = (datetime.fromtimestamp(timestamp_root) - datetime.fromtimestamp(timestamp_leaf)).seconds
# hibe.destruct_hibe_chain()
print(current_pf)
print(current_chain.chain_id)
search_time = end_time - sent_time
print('search time: ', search_time)
print('latency:', latency)
with open('elapsed_time.txt', 'a') as log:
log.write(time.asctime())
log.write('\n'.join(id_list))
log.write('\n')
log.write(', '.join(map(str, thresh_list)))
log.write('\n')
log.write('set up time: %.6f\n' % set_up_time)
log.write('search time: %.6f\n' % search_time)
log.write('latency: %d\n\n' % latency)
'''
# ----------------test latency end --------------------
# ----------------remove all containers ---------------
#
# ip_list.stop_all_containers()
# time.sleep(0.2)
# ip_list.remove_all_containers()
|
test_named_mutex.py |
import time
from winmutex import WinMutex
from unittest import TestCase
from threading import Thread
class TestNamedMutex (TestCase):
def test_named_mutex1 (self):
REPEAT_COUNT = 3
THREAD_COUNT = 3
stack = list()
def example (value):
mutex = WinMutex(name="example")
with mutex:
for n in range(REPEAT_COUNT):
stack.append(value)
time.sleep(1)
mutex.close()
threads = list()
for n in range(THREAD_COUNT):
thread = Thread(target=example, args=(n,))
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for index in range(0, REPEAT_COUNT * THREAD_COUNT, 3):
for ind in range(index, index +3):
self.assertEqual(stack[index], stack[ind])
|
qemu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cPickle
import logging
import os
import threading
import random
import select
import signal
import socket
import subprocess
import sys
import time
import shlex
#from tashi.rpycservices.rpyctypes import *
from tashi.rpycservices.rpyctypes import InstanceState, Host
from tashi.util import scrubString, boolean
from tashi import version, stringPartition
from vmcontrolinterface import VmControlInterface
def controlConsole(child, port):
"""This exposes a TCP port that connects to a particular child's monitor -- used for debugging"""
#print "controlConsole"
listenSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listenSocket.bind(("0.0.0.0", port))
#print "bound"
try:
try:
listenSocket.listen(5)
ls = listenSocket.fileno()
#input = child.monitorFd
output = child.monitorFd
#print "listen"
select.select([ls], [], [])
(s, __clientAddr) = listenSocket.accept()
while s:
if (output != -1):
(rl, __wl, __el) = select.select([s, output], [], [])
else:
(rl, __wl, __el) = select.select([s], [], [])
if (len(rl) > 0):
if (rl[0] == s):
#print "from s"
buf = s.recv(4096)
if (buf == ""):
s.close()
listenSocket.close()
s = None
continue
if (output != -1):
os.write(child.monitorFd, buf)
elif (rl[0] == output):
#print "from output"
buf = os.read(output, 4096)
#print "read complete"
if (buf == ""):
output = -1
else:
s.send(buf)
except:
s.close()
listenSocket.close()
finally:
#print "Thread exiting"
pass
class Qemu(VmControlInterface):
"""This class implements the VmControlInterface for Qemu/KVM"""
def __init__(self, config, dfs, nm):
VmControlInterface.__init__(self, config, dfs, nm)
self.QEMU_BIN = self.config.get("Qemu", "qemuBin", default = "/usr/bin/kvm")
self.INFO_DIR = self.config.get("Qemu", "infoDir", default = "/var/tmp/VmControlQemu/")
self.POLL_DELAY = float(self.config.get("Qemu", "pollDelay", default = 1))
self.migrationRetries = int(self.config.get("Qemu", "migrationRetries", default = 10))
self.monitorTimeout = float(self.config.get("Qemu", "monitorTimeout", default = 60))
self.migrateTimeout = float(self.config.get("Qemu", "migrateTimeout", default = 300))
self.useMigrateArgument = boolean(self.config.get("Qemu", "useMigrateArgument", default = False))
self.statsInterval = float(self.config.get("Qemu", "statsInterval", default = 0))
reservedMem = self.config.get("Qemu", "reservedMem", default = 512)
reservedMem = int(reservedMem)
self.reservedMem = reservedMem
self.log = logging.getLogger(__file__)
self.ifPrefix = "tashi"
# keep a handle to my NM service
self.service = None
self.controlledVMs = {}
self.hostname = socket.gethostname()
self.usedPorts = []
self.usedPortsLock = threading.Lock()
self.vncPorts = []
self.vncPortLock = threading.Lock()
self.consolePort = 10000
self.consolePortLock = threading.Lock()
maxParallelMigrations = self.config.get("Qemu", "maxParallelMigrations")
maxParallelMigrations = int(maxParallelMigrations)
if maxParallelMigrations < 1:
maxParallelMigrations = 1
self.migrationSemaphore = threading.Semaphore(maxParallelMigrations)
self.stats = {}
self.suspendHandler = self.config.get("Qemu", "suspendHandler", default = "gzip")
self.resumeHandler = self.config.get("Qemu", "resumeHandler", default = "zcat")
self.scratchVg = self.config.get("Qemu", "scratchVg")
self.scratchDir = self.config.get("Qemu", "scratchDir", default = "/tmp")
try:
os.mkdir(self.INFO_DIR)
except:
pass
self.__scanInfoDir()
threading.Thread(target=self.__pollVMsLoop).start()
if (self.statsInterval > 0):
threading.Thread(target=self.statsThread).start()
class anonClass:
def __init__(self, **attrs):
self.__dict__.update(attrs)
def __dereferenceLink(self, spec):
newspec = os.path.realpath(spec)
return newspec
def __getHostPids(self):
"""Utility function to get a list of system PIDs that match the QEMU_BIN specified (/proc/nnn/exe)"""
pids = []
real_bin = self.__dereferenceLink(self.QEMU_BIN)
for f in os.listdir("/proc"):
try:
binary = os.readlink("/proc/%s/exe" % (f))
if (binary.find(real_bin) != -1):
pids.append(int(f))
except Exception:
pass
return pids
# extern
def getInstances(self):
"""Will return a dict of instances by vmId to the caller"""
return dict((x, self.controlledVMs[x].instance) for x in self.controlledVMs.keys())
def __matchHostPids(self):
"""This is run in a separate polling thread and it must do things that are thread safe"""
vmIds = self.controlledVMs.keys()
pids = self.__getHostPids()
for vmId in vmIds:
child = self.controlledVMs[vmId]
# check to see if the child was just started.
# Only try to check on it if startup was more
# than 5 seconds in the past
if "startTime" in child.__dict__:
if child.startTime + 5 < time.time():
del child.startTime
else:
self.log.info("Not processing vmId %d because it is newly started" % (vmId))
continue
instance = child.instance
name = instance.name
if vmId not in pids:
# VM is no longer running, but is still
# considered controlled
# remove info file
os.unlink(self.INFO_DIR + "/%d"%(vmId))
# XXXstroucki python should handle
# locking here (?)
del self.controlledVMs[vmId]
# remove any stats (appropriate?)
try:
del self.stats[vmId]
except:
pass
if (child.vncPort >= 0):
self.vncPortLock.acquire()
self.vncPorts.remove(child.vncPort)
self.vncPortLock.release()
self.log.info("Removing vmId %d because it is no longer running" % (vmId))
# if the VM was started from this process,
# wait on it
if (child.OSchild):
try:
(_pid, status) = os.waitpid(vmId, 0)
self.log.info("vmId %s exited with status %s" % (vmId, status))
except:
self.log.exception("waitpid failed for vmId %s" % (vmId))
# recover the child's stderr and monitor
# output if possible
if (child.errorBit):
if (child.OSchild):
f = open("/tmp/%d.err" % (vmId), "w")
f.write(child.stderr.read())
f.close()
f = open("/tmp/%d.pty" % (vmId), "w")
for i in child.monitorHistory:
f.write(i)
f.close()
# remove scratch storage
try:
if self.scratchVg is not None:
scratchName = "lv%s" % name
self.log.info("Removing any scratch for %s" % (name))
cmd = "/sbin/lvremove --quiet -f %s/%s" % (self.scratchVg, scratchName)
__result = subprocess.Popen(cmd.split(), executable=cmd.split()[0], stdout=subprocess.PIPE, stderr=open(os.devnull, "w"), close_fds=True).wait()
except:
self.log.warning("Problem cleaning scratch volumes")
pass
# let the NM know
try:
# XXXstroucki: we don't want to treat
# the source VM of a migration exiting
# as an actual
# exit, but the NM should probably know.
self.nm.vmStateChange(vmId, None, InstanceState.Exited)
except Exception:
self.log.exception("vmStateChange failed for VM %s" % (name))
else:
# VM is still running
try:
if (child.migratingOut):
self.nm.vmStateChange(vmId, None, InstanceState.MigrateTrans)
elif (instance.state == InstanceState.Orphaned) or \
(instance.state == InstanceState.Activating):
self.nm.vmStateChange(vmId, None, InstanceState.Running)
except:
self.log.exception("vmStateChange failed for VM %s" % (name))
# called once on startup
def __scanInfoDir(self):
"""This is not thread-safe and must only be used during class initialization"""
controlledVMs = {}
controlledVMs.update(map(lambda x: (int(x), self.anonClass(OSchild=False, errorBit=False, migratingOut=False)), os.listdir(self.INFO_DIR + "/")))
if (len(controlledVMs) == 0):
self.log.info("No VM information found in %s" % (self.INFO_DIR))
for vmId in controlledVMs:
try:
child = self.__loadChildInfo(vmId)
self.vncPortLock.acquire()
if (child.vncPort >= 0):
self.vncPorts.append(child.vncPort)
self.vncPortLock.release()
child.monitorFd = os.open(child.ptyFile, os.O_RDWR | os.O_NOCTTY)
child.monitor = os.fdopen(child.monitorFd)
#XXXstroucki ensure instance has vmId
child.instance.vmId = vmId
self.controlledVMs[vmId] = child
except Exception:
self.log.exception("Failed to load VM info for %d", vmId)
else:
self.log.info("Loaded VM info for %d", vmId)
# service thread
def __pollVMsLoop(self):
"""Infinite loop that checks for dead VMs"""
# As of 2011-12-30, nm is None when this is called, and
# is set later by the NM. Things further down require
# access to the NM, so wait until it is set.
# Moved into __pollVMsLoop since putting it in this thread
# will allow the init to complete and nm to be actually
# set.
while self.nm is None:
self.log.info("Waiting for NM initialization")
time.sleep(2)
while True:
try:
time.sleep(self.POLL_DELAY)
self.__matchHostPids()
except:
self.log.exception("Exception in poolVMsLoop")
def __waitForExit(self, vmId):
"""This waits until an element is removed from the dictionary -- the polling thread must detect an exit"""
while vmId in self.controlledVMs:
time.sleep(self.POLL_DELAY)
def __getChildFromPid(self, pid):
"""Do a simple dictionary lookup, but raise a unique exception if the key doesn't exist"""
child = self.controlledVMs.get(pid, None)
if (not child):
raise Exception, "Uncontrolled vmId %d" % (pid)
return child
def __consumeAvailable(self, child):
"""Consume characters one-by-one until they stop coming"""
monitorFd = child.monitorFd
buf = ""
try:
(rlist, __wlist, __xlist) = select.select([monitorFd], [], [], 0.0)
while (len(rlist) > 0):
c = os.read(monitorFd, 1)
if (c == ""):
self.log.error("Early termination on monitor for vmId %d" % (child.pid))
child.errorBit = True
raise EOFError
buf = buf + c
(rlist, __wlist, __xlist) = select.select([monitorFd], [], [], 0.0)
finally:
child.monitorHistory.append(buf)
return buf
def __consumeUntil(self, child, needle, timeout = -1):
"""Consume characters one-by-one until something specific comes up"""
if (timeout == -1):
timeout = self.monitorTimeout
monitorFd = child.monitorFd
buf = " " * len(needle)
try:
while (buf[-(len(needle)):] != needle):
#print "[BUF]: %s" % (buf)
#print "[NEE]: %s" % (needle)
(rlist, __wlist, __xlist) = select.select([monitorFd], [], [], timeout)
if (len(rlist) == 0):
self.log.error("Timeout getting results from monitor on FD %s for vmId %d" % (monitorFd, child.pid))
child.errorBit = True
raise EOFError
c = os.read(monitorFd, 1)
if (c == ""):
self.log.error("Early termination on monitor FD %s for vmId %d" % (monitorFd, child.pid))
child.errorBit = True
raise EOFError
buf = buf + c
finally:
child.monitorHistory.append(buf[len(needle):])
return buf[len(needle):]
def __enterCommand(self, child, command, expectPrompt = True, timeout = -1):
"""Enter a command on the qemu monitor"""
res = self.__consumeAvailable(child)
os.write(child.monitorFd, command + "\n")
if (expectPrompt):
# XXXstroucki: receiving a vm can take a long time
self.__consumeUntil(child, command, timeout=timeout)
res = self.__consumeUntil(child, "(qemu) ", timeout=timeout)
return res
def __loadChildInfo(self, vmId):
child = self.anonClass(pid=vmId)
info = open(self.INFO_DIR + "/%d"%(child.pid), "r")
(instance, pid, ptyFile) = cPickle.load(info)
info.close()
if (pid != child.pid):
raise Exception, "PID mismatch"
child.instance = instance
child.pid = pid
child.ptyFile = ptyFile
if ('monitorHistory' not in child.__dict__):
child.monitorHistory = []
if ('OSchild' not in child.__dict__):
child.OSchild = False
if ('errorBit' not in child.__dict__):
child.errorBit = False
if ('migratingOut' not in child.__dict__):
child.migratingOut = False
if ('vncPort' not in child.__dict__):
child.vncPort = -1
return child
def __saveChildInfo(self, child):
# XXXstroucki: if the disk INFO_DIR is on is full,
# we may not be able to store our data. This can lead
# to VMs remaining running that the NM doesn't know about
# Can we do anything, or should be a task external to Tashi?
info = open(self.INFO_DIR + "/%d"%(child.pid), "w")
cPickle.dump((child.instance, child.pid, child.ptyFile), info)
info.close()
# extern
def getHostInfo(self, service):
host = Host()
host.name = self.hostname
self.service = service
host.id = self.service.id
# Linux specific
memoryStr = open("/proc/meminfo","r").readline().strip().split()
if (memoryStr[2] == "kB"):
# XXXstroucki should have parameter for reserved mem
host.memory = (int(memoryStr[1])/1024) - self.reservedMem
else:
self.log.warning('Unable to determine amount of physical memory - reporting 0')
host.memory = 0
host.cores = os.sysconf("SC_NPROCESSORS_ONLN")
host.up = True
host.decayed = False
host.version = version
return host
def __stripSpace(self, s):
return "".join(s.split())
def __startVm(self, instance, source):
"""Universal function to start a VM -- used by instantiateVM, resumeVM, and prepReceiveVM"""
# Capture __startVm Hints
# CPU hints
cpuModel = instance.hints.get("cpumodel")
cpuString = ""
if cpuModel:
# clean off whitespace
cpuModel = self.__stripSpace(cpuModel)
cpuString = "-cpu " + cpuModel
# Clock hints
clockString = instance.hints.get("clock", "dynticks")
# clean off whitespace
clockString = self.__stripSpace(clockString)
# Disk hints
# XXXstroucki: insert commentary on jcipar's performance
# measurements
# virtio is recommended, but linux will name devices
# vdX instead of sdX. This adds a trap for someone who
# converts a physical machine or other virtualization
# layer's image to run under Tashi.
diskInterface = instance.hints.get("diskInterface", "ide")
# clean off whitespace
diskInterface = self.__stripSpace(diskInterface)
diskString = ""
for index in range(0, len(instance.disks)):
disk = instance.disks[index]
uri = scrubString(disk.uri)
imageLocal = self.dfs.getLocalHandle("images/" + uri)
imageLocal = self.__dereferenceLink(imageLocal)
thisDiskList = [ "file=%s" % imageLocal ]
thisDiskList.append("if=%s" % diskInterface)
thisDiskList.append("index=%d" % index)
if (index == 0 and diskInterface == "virtio"):
thisDiskList.append("boot=on")
if (disk.persistent):
snapshot = "off"
migrate = "off"
else:
snapshot = "on"
migrate = "on"
thisDiskList.append("cache=off")
thisDiskList.append("snapshot=%s" % snapshot)
if (self.useMigrateArgument):
thisDiskList.append("migrate=%s" % migrate)
diskString = diskString + "-drive " + ",".join(thisDiskList) + " "
# scratch disk
scratchSize = instance.hints.get("scratchSpace", "0")
scratchSize = int(scratchSize)
scratchName = None
try:
if scratchSize > 0:
if self.scratchVg is None:
raise Exception, "No scratch volume group defined"
# create scratch disk
# XXXstroucki: needs to be cleaned somewhere
# XXXstroucki: clean user provided instance name
scratchName = "lv%s" % instance.name
# XXXstroucki hold lock
# XXXstroucki check for capacity
cmd = "/sbin/lvcreate --quiet -n%s -L %dG %s" % (scratchName, scratchSize, self.scratchVg)
# XXXstroucki check result
__result = subprocess.Popen(cmd.split(), executable=cmd.split()[0], stdout=subprocess.PIPE).wait()
index += 1
thisDiskList = [ "file=/dev/%s/%s" % (self.scratchVg, scratchName) ]
thisDiskList.append("if=%s" % diskInterface)
thisDiskList.append("index=%d" % index)
thisDiskList.append("cache=off")
# XXXstroucki force scratch disk to be
# persistent
if (True or disk.persistent):
snapshot = "off"
migrate = "off"
else:
snapshot = "on"
migrate = "on"
thisDiskList.append("snapshot=%s" % snapshot)
if (self.useMigrateArgument):
thisDiskList.append("migrate=%s" % migrate)
diskString = "%s-drive %s " % (diskString, ",".join(thisDiskList))
except:
self.log.exception('caught exception in scratch disk formation')
raise
# Nic hints
nicModel = instance.hints.get("nicModel", "virtio")
# clean off whitespace
nicModel = self.__stripSpace(nicModel)
nicString = ""
nicNetworks = {}
for i in range(0, len(instance.nics)):
# Don't allow more than one interface per vlan
nic = instance.nics[i]
if nicNetworks.has_key(nic.network):
continue
nicNetworks[nic.network] = True
nicString = nicString + "-net nic,macaddr=%s,model=%s,vlan=%d -net tap,ifname=%s%d.%d,vlan=%d,script=/etc/qemu-ifup.%d " % (nic.mac, nicModel, nic.network, self.ifPrefix, instance.id, i, nic.network, nic.network)
# ACPI
if (boolean(instance.hints.get("noAcpi", False))):
noAcpiString = "-no-acpi"
else:
noAcpiString = ""
# Construct the qemu command
strCmd = "%s %s %s -clock %s %s %s -m %d -smp %d -serial null -vnc none -monitor pty -balloon virtio" % (self.QEMU_BIN, noAcpiString, cpuString, clockString, diskString, nicString, instance.memory, instance.cores)
if (source):
strCmd = '%s -incoming "%s"' % (strCmd, source)
# XXXstroucki perhaps we're doing it backwards
cmd = shlex.split(strCmd)
self.log.info("Executing command: %s" % (strCmd))
(pipe_r, pipe_w) = os.pipe()
pid = os.fork()
if (pid == 0):
# child process
pid = os.getpid()
os.setpgid(pid, pid)
os.close(pipe_r)
os.dup2(pipe_w, sys.stderr.fileno())
for i in [sys.stdin.fileno(), sys.stdout.fileno()]:
try:
os.close(i)
except:
pass
for i in xrange(3, os.sysconf("SC_OPEN_MAX")):
try:
os.close(i)
except:
pass
# XXXstroucki unfortunately no kvm option yet
# to direct COW differences elsewhere, so change
# this process' TMPDIR, which kvm will honour
os.environ['TMPDIR'] = self.scratchDir
os.execl(self.QEMU_BIN, *cmd)
sys.exit(-1)
# parent process
os.close(pipe_w)
# enforce the new instance to have our hostId!
# otherwise, a migrated VM will have its previous hostId.
instance.hostId = self.service.id
child = self.anonClass(pid=pid, instance=instance, stderr=os.fdopen(pipe_r, 'r'), migratingOut = False, monitorHistory=[], errorBit = True, OSchild = True)
child.ptyFile = None
child.vncPort = -1
child.instance.vmId = child.pid
# XXXstroucki what about our hostId?
# we need to make sure we don't report up a VM
# with an inaccurate hostId.
# Add a token to this new child object so that
# we don't mistakenly clean up when matchHostPids
# runs and the child process hasn't exec'ed yet.
child.startTime = time.time()
self.__saveChildInfo(child)
self.log.info("Adding vmId %d" % (child.pid))
self.controlledVMs[child.pid] = child
return (child.pid, cmd)
def __getPtyInfo(self, child, issueContinue):
ptyFile = None
while not ptyFile:
line = child.stderr.readline()
if (line == ""):
try:
os.waitpid(child.pid, 0)
except:
self.log.exception("waitpid failed")
raise Exception, "Failed to start VM -- ptyFile not found"
redirLine = "char device redirected to "
if (line.find(redirLine) != -1):
ptyFile=line[len(redirLine):].strip()
break
child.ptyFile = ptyFile
child.monitorFd = os.open(child.ptyFile, os.O_RDWR | os.O_NOCTTY)
child.monitor = os.fdopen(child.monitorFd)
self.__saveChildInfo(child)
if (issueContinue):
# XXXstroucki: receiving a vm can take a long time
self.__enterCommand(child, "c", timeout=None)
def __stopVm(self, vmId, target, stopFirst):
"""Universal function to stop a VM -- used by suspendVM, migrateVM """
child = self.__getChildFromPid(vmId)
if (stopFirst):
self.__enterCommand(child, "stop")
if (target):
retry = self.migrationRetries
while (retry > 0):
# migrate in foreground respecting cow backed
# images
# XXXstroucki if we're doing this in the fg
# then it may still be ongoing when the timeout
# happens, and no way of interrupting it
# trying to restart the migration by running
# the command again (when qemu is ready to
# listen again) is probably not helpful
# XXXstroucki: failures observed:
# "migration failed"
# "Block format 'qcow' used by device '' does not support feature 'live migration'
success = False
# see if migration can be speeded up
res = self.__enterCommand(child, "migrate_set_speed 1g", timeout=self.migrateTimeout)
res = self.__enterCommand(child, "migrate -i %s" % (target), timeout=self.migrateTimeout)
retry = retry - 1
if (res.find("Block migration completed") != -1):
success = True
retry = 0
break
else:
self.log.error("Migration (transiently) failed: %s\n", res)
if (retry == 0) and (success is False):
self.log.error("Migration failed: %s\n", res)
child.errorBit = True
raise RuntimeError
# XXXstroucki what if migration is still ongoing, and
# qemu is not listening?
self.__enterCommand(child, "quit", expectPrompt=False)
return vmId
# extern
def instantiateVm(self, instance):
# XXXstroucki: check capacity before instantiating
try:
(vmId, cmd) = self.__startVm(instance, None)
child = self.__getChildFromPid(vmId)
self.__getPtyInfo(child, False)
child.cmd = cmd
self.nm.createInstance(child.instance)
self.nm.vmStateChange(vmId, None, InstanceState.Running)
# XXXstroucki Should make sure Running state is saved
# otherwise on restart it will appear as Activating
# until we update the state in __matchHostPids
child.instance.state = InstanceState.Running
self.__saveChildInfo(child)
return vmId
except:
self.log.exception("instantiateVm failed")
raise
# extern
def suspendVm(self, vmId, target):
# XXX: Use fifo to improve performance
# XXXstroucki: we could create a fifo on the local fs,
# then start a thread to copy it to dfs. But if we're
# reading from dfs directly on resume, why not write
# directly here?
#tmpTarget = "/%s/tashi_qemu_suspend_%d_%d" % (self.scratchDir, os.getpid(), vmId)
fn = self.dfs.getLocalHandle("%s" % target)
vmId = self.__stopVm(vmId, "\"exec:%s > %s\"" % (self.suspendHandler, fn), True)
#self.dfs.copyTo(tmpTarget, target)
#os.unlink(tmpTarget)
return vmId
# extern
def resumeVmHelper(self, instance, source):
vmId = instance.vmId
child = self.__getChildFromPid(vmId)
try:
self.__getPtyInfo(child, True)
except EOFError:
self.log.error("Failed to get pty info -- VM likely died")
child.errorBit = True
raise
status = "paused"
while ("running" not in status):
try:
status = self.__enterCommand(child, "info status")
except EOFError:
pass
time.sleep(60)
self.nm.vmStateChange(vmId, None, InstanceState.Running)
child.instance.state = InstanceState.Running
self.__saveChildInfo(child)
# extern
def resumeVm(self, instance, source):
fn = self.dfs.getLocalHandle("%s" % (source))
(vmId, cmd) = self.__startVm(instance, "exec:%s < %s" % (self.resumeHandler, fn))
child = self.__getChildFromPid(vmId)
child.cmd = cmd
return vmId
def __checkPortListening(self, port):
# XXXpipe: find whether something is listening yet on the port
p = subprocess.Popen("netstat -ln | grep 0.0.0.0:%d | wc -l" % (port), shell = True, stdin = subprocess.PIPE, stdout = subprocess.PIPE, close_fds = True)
(stdin, stdout) = (p.stdin, p.stdout)
stdin.close()
r = stdout.read()
lc = int(r.strip())
if (lc < 1):
return False
else:
return True
# extern
def prepReceiveVm(self, instance, source):
self.usedPortsLock.acquire()
while True:
port = random.randint(19000, 20000)
if port not in self.usedPorts:
break
self.usedPorts.append(port)
self.usedPortsLock.release()
(vmId, cmd) = self.__startVm(instance, "tcp:0.0.0.0:%d" % (port))
transportCookie = cPickle.dumps((port, vmId, self.hostname))
child = self.__getChildFromPid(vmId)
child.instance.state = InstanceState.Running
child.cmd = cmd
child.transportCookie = transportCookie
self.__saveChildInfo(child)
# XXX: Cleanly wait until the port is listening
while self.__checkPortListening(port) is not True:
time.sleep(1)
return transportCookie
# extern
def migrateVm(self, vmId, target, transportCookie):
self.migrationSemaphore.acquire()
try:
(port, _vmId, _hostname) = cPickle.loads(transportCookie)
child = self.__getChildFromPid(vmId)
child.migratingOut = True
# tell the VM to live-migrate out
res = self.__stopVm(vmId, "tcp:%s:%d" % (target, port), False)
# XXX: Some sort of feedback would be nice
# XXX: Should we block?
# XXXstroucki: isn't this what __waitForExit does?
self.__waitForExit(vmId)
finally:
self.migrationSemaphore.release()
return res
# extern
def receiveVm(self, transportCookie):
(port, vmId, _hostname) = cPickle.loads(transportCookie)
try:
child = self.__getChildFromPid(vmId)
except:
# XXXstroucki: Does hostname contain the peer hostname?
self.log.error("Failed to get child info; transportCookie = %s; hostname = %s" %
(str(cPickle.loads(transportCookie)), _hostname))
raise
try:
self.__getPtyInfo(child, True)
except EOFError:
self.log.error("Failed to get pty info -- VM likely died")
child.errorBit = True
raise
self.usedPortsLock.acquire()
self.usedPorts = filter(lambda _port: _port != port, self.usedPorts)
self.usedPortsLock.release()
return vmId
# extern
def pauseVm(self, vmId):
child = self.__getChildFromPid(vmId)
self.__enterCommand(child, "stop")
# XXXstroucki we have no Stopped state, so consider
# the VM still Running?
# extern
def unpauseVm(self, vmId):
child = self.__getChildFromPid(vmId)
self.__enterCommand(child, "c")
# XXXstroucki as above, should this be a state change
# or not?
# extern
def shutdownVm(self, vmId):
"""'system_powerdown' doesn't seem to actually shutdown the VM on some versions of KVM with some versions of Linux"""
# If clean shutdown is desired, should try on VM first,
# shutdownVm second and if that doesn't work use
# destroyVm
child = self.__getChildFromPid(vmId)
self.__enterCommand(child, "system_powerdown")
# extern
def destroyVm(self, vmId):
child = self.__getChildFromPid(vmId)
child.migratingOut = False
# XXX: the child could have exited between these two points, but I don't know how to fix that since it might not be our child process
os.kill(child.pid, signal.SIGTERM)
def __specificStartVnc(self, vmId):
child = self.__getChildFromPid(vmId)
if (child.vncPort == -1):
self.vncPortLock.acquire()
port = 0
while (port in self.vncPorts):
port += 1
self.vncPorts.append(port)
self.vncPortLock.release()
self.__enterCommand(child, "change vnc :%d" % (port))
child.vncPort = port
self.__saveChildInfo(child)
port = child.vncPort
return "VNC running on %s:%d" % (self.hostname, port + 5900)
def __specificStopVnc(self, vmId):
child = self.__getChildFromPid(vmId)
self.__enterCommand(child, "change vnc none")
if (child.vncPort != -1):
self.vncPortLock.acquire()
self.vncPorts.remove(child.vncPort)
self.vncPortLock.release()
child.vncPort = -1
self.__saveChildInfo(child)
return "VNC halted"
def __specificChangeCdRom(self, vmId, iso):
child = self.__getChildFromPid(vmId)
imageLocal = self.dfs.getLocalHandle("images/" + iso)
self.__enterCommand(child, "change ide1-cd0 %s" % (imageLocal))
return "Changed ide1-cd0 to %s" % (iso)
def __specificStartConsole(self, vmId):
child = self.__getChildFromPid(vmId)
self.consolePortLock.acquire()
# XXXstroucki why not use the existing ports scheme?
consolePort = self.consolePort
self.consolePort += 1
self.consolePortLock.release()
threading.Thread(target=controlConsole, args=(child,consolePort)).start()
return "Control console listening on %s:%d" % (self.hostname, consolePort)
def __specificReset(self, vmId):
child = self.__getChildFromPid(vmId)
self.__enterCommand(child, "system_reset")
return "Sent reset signal to instance"
# extern
def vmmSpecificCall(self, vmId, arg):
arg = arg.lower()
changeCdText = "changecdrom:"
if (arg == "startvnc"):
return self.__specificStartVnc(vmId)
elif (arg == "stopvnc"):
return self.__specificStopVnc(vmId)
elif (arg.startswith(changeCdText)):
iso = scrubString(arg[len(changeCdText):])
return self.__specificChangeCdRom(vmId, iso)
elif (arg == "startconsole"):
return self.__specificStartConsole(vmId)
elif (arg == "reset"):
return self.__specificReset(vmId)
elif (arg == "list"):
commands = [
"startVnc",
"stopVnc",
"changeCdrom:<image.iso>",
"startConsole",
"reset",
]
return "\n".join(commands)
else:
return "Unknown command %s" % (arg)
# extern
def listVms(self):
return self.controlledVMs.keys()
def __processVmStats(self, vmId):
try:
f = open("/proc/%d/stat" % (vmId))
procData = f.read()
f.close()
except:
self.log.warning("Unable to get data for instance %d" % vmId)
return
ws = procData.strip().split()
userTicks = float(ws[13])
sysTicks = float(ws[14])
myTicks = userTicks + sysTicks
vsize = (int(ws[22]))/1024.0/1024.0
rss = (int(ws[23])*4096)/1024.0/1024.0
cpuSeconds = myTicks/self.ticksPerSecond
# XXXstroucki be more exact here?
last = time.time() - self.statsInterval
lastCpuSeconds = self.cpuStats.get(vmId, cpuSeconds)
if lastCpuSeconds is None:
lastCpuSeconds = cpuSeconds
cpuLoad = (cpuSeconds - lastCpuSeconds)/(time.time() - last)
self.cpuStats[vmId] = cpuSeconds
try:
child = self.controlledVMs[vmId]
except:
self.log.warning("Unable to obtain information on instance %d" % vmId)
return
(recvMBs, sendMBs, recvBytes, sendBytes) = (0.0, 0.0, 0.0, 0.0)
for i in range(0, len(child.instance.nics)):
netDev = "%s%d.%d" % (self.ifPrefix, child.instance.id, i)
(tmpRecvMBs, tmpSendMBs, tmpRecvBytes, tmpSendBytes) = self.netStats.get(netDev, (0.0, 0.0, 0.0, 0.0))
(recvMBs, sendMBs, recvBytes, sendBytes) = (recvMBs + tmpRecvMBs, sendMBs + tmpSendMBs, recvBytes + tmpRecvBytes, sendBytes + tmpSendBytes)
self.stats[vmId] = self.stats.get(vmId, {})
child = self.controlledVMs.get(vmId, None)
if (child):
try:
res = self.__enterCommand(child, "info blockstats")
except EOFError:
# The VM is likely exiting
return
for l in res.split("\n"):
(device, __sep, data) = stringPartition(l, ": ")
if (data != ""):
for field in data.split(" "):
(label, __sep, val) = stringPartition(field, "=")
if (val != ""):
self.stats[vmId]['%s_%s_per_s' % (device, label)] = (float(val) - float(self.stats[vmId].get('%s_%s' % (device, label), 0)))/self.statsInterval
self.stats[vmId]['%s_%s' % (device, label)] = int(val)
self.stats[vmId]['cpuLoad'] = cpuLoad
self.stats[vmId]['rss'] = rss
self.stats[vmId]['vsize'] = vsize
self.stats[vmId]['recvMBs'] = sendMBs
self.stats[vmId]['sendMBs'] = recvMBs
# thread
def statsThread(self):
self.ticksPerSecond = float(os.sysconf('SC_CLK_TCK'))
self.netStats = {}
self.cpuStats = {}
# XXXstroucki be more exact here?
last = time.time() - self.statsInterval
while True:
now = time.time()
try:
f = open("/proc/net/dev")
netData = f.readlines()
f.close()
for l in netData:
if (l.find(self.ifPrefix) != -1):
(dev, __sep, ld) = stringPartition(l, ":")
dev = dev.strip()
ws = ld.split()
recvBytes = float(ws[0])
sendBytes = float(ws[8])
(recvMBs, sendMBs, lastRecvBytes, lastSendBytes) = self.netStats.get(dev, (0.0, 0.0, recvBytes, sendBytes))
if (recvBytes < lastRecvBytes):
# We seem to have overflowed
# XXXstroucki How likely is this to happen?
if (lastRecvBytes > 2**32):
lastRecvBytes = lastRecvBytes - 2**64
else:
lastRecvBytes = lastRecvBytes - 2**32
if (sendBytes < lastSendBytes):
if (lastSendBytes > 2**32):
lastSendBytes = lastSendBytes - 2**64
else:
lastSendBytes = lastSendBytes - 2**32
recvMBs = (recvBytes-lastRecvBytes)/(now-last)/1024.0/1024.0
sendMBs = (sendBytes-lastSendBytes)/(now-last)/1024.0/1024.0
self.netStats[dev] = (recvMBs, sendMBs, recvBytes, sendBytes)
for vmId in self.controlledVMs:
self.__processVmStats(vmId)
except:
self.log.exception("statsThread threw an exception")
last = now
time.sleep(self.statsInterval)
# extern
def getStats(self, vmId):
return self.stats.get(vmId, {})
|
main_window.py | import os
import re
import signal
import shutil
import threading
import sys
from .app_list_widget import AppList
from .eq_popover import EqPopover
from .latency_popover import LatencyPopover
from .rnnoise_popover import RnnoisePopover
from .groups_popover import JackGroupsPopover
from .port_select_popover import PortSelectPopover
from .vumeter_widget import Vumeter
from ..settings import LAYOUT_DIR
from ..socket import Client
from gi import require_version as gi_require_version
# from pulsectl import Pulse
gi_require_version('Gtk', '3.0')
gi_require_version('AppIndicator3', '0.1')
from gi.repository import Gtk, GLib, AppIndicator3
class MainWindow(Gtk.Window):
def __init__(self, isserver=False, trayonly=False):
self.isserver = isserver
self.client = Client(listen=True)
self.config = self.client.config
self.trayonly = trayonly
self.windowinstance = None
self.tray = None
if isserver:
self.tray = self.create_indicator()
self.client.set_callback_function('tray', self.update_tray_status)
if trayonly:
self.client.set_callback_function('exit', self.close_on_server_exit)
return
self.windowinstance = self.start_window(isserver)
def start_window(self, isserver):
self.trayonly = False
self.exit_flag = False
GLib.threads_init()
Gtk.Window.__init__(self)
self.builder = Gtk.Builder()
self.layout = self.config['layout']
component_list = [
'window',
'menu_popover',
'rename_popover',
'popover_entry',
'latency_popover',
'latency_adjust',
'rnnoise_popover',
'rnnoise_latency_adjust',
'rnnoise_threshold_adjust',
'jack_group_popover',
'sink_input_list',
'source_output_list',
'sink_input_scroll',
'source_output_scroll',
'source_output_viewport',
'sink_input_viewport',
'vumeter_toggle',
'vi_1_peak',
'channel_groups',
]
for i in range(1, 4):
component_list.append(f'hi_{i}_adjust')
component_list.append(f'vi_{i}_adjust')
component_list.append(f'a_{i}_adjust')
component_list.append(f'b_{i}_adjust')
try:
self.builder.add_objects_from_file(
os.path.join(LAYOUT_DIR, f'{self.layout}.glade'),
component_list
)
except Exception as ex:
print('Error building main window!\n{}'.format(ex))
sys.exit(1)
self.devices = {}
self.devices['a'] = self.client.list_hardware_devices('sinks')
self.devices['hi'] = self.client.list_hardware_devices('sources')
# self.devices['b'] = self.client.list_virtual_devices('sources')
# self.devices['vi'] = self.client.list_virtual_devices('sinks')
self.hardware_comboboxes = {}
self.primary_buttons = {}
self.volume_adjusts = {}
self.volume_sliders = {}
self.mute_buttons = {}
self.loopback_buttons = {}
self.rnnoise_buttons = {}
self.eq_buttons = {}
self.enable_vumeters = True
if shutil.which('pulse-vumeter') is False or \
self.config['enable_vumeters'] is False:
self.enable_vumeters = False
self.start_hardware_comboboxes()
self.start_inputs()
self.start_outputs()
self.start_vumeters()
self.start_app_list()
self.start_menu_items()
# self.start_layout_combobox()
self.window = self.builder.get_object('window')
# self.add_window(self.window)
# super().__init__(self.window)
self.listen_socket()
self.window.connect('delete_event', self.delete_event)
# self.window.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.builder.connect_signals(self)
self.window.show_all()
if isserver is not False:
signal.signal(signal.SIGTERM, self.delete_event)
signal.signal(signal.SIGINT, self.delete_event)
return self.window
def start_menu_items(self):
if self.layout == 'default':
self.menu_button = self.builder.get_object('menu_button')
self.menu_popover = self.builder.get_object('menu_popover')
self.menu_popover.set_relative_to(self.menu_button)
self.menu_button.connect('pressed', self.open_settings)
self.vumeter_toggle = self.builder.get_object('vumeter_toggle')
self.vumeter_toggle.set_active(self.enable_vumeters)
self.vumeter_toggle.connect('toggled', self.toggle_vumeters)
self.cleanup_toggle = self.builder.get_object('cleanup_toggle')
self.cleanup_toggle.set_active(self.config['cleanup'])
self.cleanup_toggle.connect('toggled', self.toggle_cleanup)
self.tray_toggle = self.builder.get_object('tray_toggle')
self.tray_toggle.set_active(self.config['tray'])
self.tray_toggle.connect('toggled', self.toggle_tray)
self.layout_combobox = self.builder.get_object('layout_combobox')
layout_list = os.listdir(LAYOUT_DIR)
i = 0
for layout in layout_list:
self.layout_combobox.append_text(layout[:len(layout) - 6])
if layout[:len(layout) - 6] == self.layout:
self.layout_combobox.set_active(i)
i += 1
self.layout_combobox.connect('changed', self.change_layout)
# self.jack_toggle_button = self.builder.get_object('jack_toggle')
# self.jack_toggle_button.set_active(self.pulse.config['jack']['enable'])
# self.jack_toggle_button.connect('toggled', self.toggle_jack)
# self.jack_toggle_button.set_sensitive(False)
# self.test = self.builder.get_object('test')
# self.test.connect('pressed', self.open_group_popover)
# self.jack_gp_popover = self.builder.get_object('jack_group_popover')
# self.jack_gp_popover.set_relative_to(self.test)
# self.jack_toggle_button.connect('toggled', self.toggle_jack)
def toggle_tray(self, widget):
state = widget.get_active()
self.client.set_tray(state)
if self.isserver:
if state:
if self.tray is None:
self.tray = self.create_indicator()
self.tray.set_status(1)
else:
self.tray.set_status(0)
def toggle_cleanup(self, widget):
self.client.set_cleanup(widget.get_active())
# not perfect yet but works
def change_layout(self, combobox):
self.client.set_layout(combobox.get_active_text())
self.windowinstance.destroy()
self.delete_event()
self.windowinstance = self.start_window(self.isserver)
self.trayonly = False
def open_settings(self, widget):
self.menu_popover.popup()
def toggle_jack(self, widget):
self.pulse.config['jack']['enable'] = widget.get_active()
for i in ['vi', 'hi']:
for j in self.pulse.config[i]:
self.pulse.config[i][j]['jack'] = widget.get_active()
def toggle_vumeters(self, widget):
if not shutil.which('pulse-vumeter'):
return
self.enable_vumeters = widget.get_active()
self.config['enable_vumeters'] = widget.get_active()
for device_type in ['hi', 'vi', 'a', 'b']:
for device_id in self.config[device_type]:
# if self.config[device_type][device_id]['name'] != '':
if widget.get_active() is False:
self.vu_list[device_type][device_id].close()
else:
self.vu_list[device_type][device_id].reload_device()
self.vu_list[device_type][device_id].start()
def start_vumeters(self):
self.vu_list = {}
for device_type in ['hi', 'vi', 'a', 'b']:
self.vu_list[device_type] = {}
for device_id in self.config[device_type]:
device_config = self.config[device_type][device_id]
grid = self.builder.get_object(f'{device_type}_{device_id}_vumeter')
vert = True if self.layout == 'default' else False
vumeter = Vumeter(device_type, device_id,
self.config, vertical=vert)
grid.add(vumeter)
if device_config['name'] != '':
if self.enable_vumeters is True:
try:
vumeter.start()
except Exception:
print('Could not start vumeter for',
'{device_type}{device_id}')
self.vu_list[device_type][device_id] = vumeter
def start_app_list(self):
# this is probably not the best solution but it handles the pactl errors fine
sink_input_viewport = self.builder.get_object('sink_input_viewport')
source_output_viewport = self.builder.get_object('source_output_viewport')
try:
self.sink_input_box = AppList('sink-input', self.client)
self.source_output_box = AppList('source-output', self.client)
sink_input_viewport.add(self.sink_input_box)
source_output_viewport.add(self.source_output_box)
self.subscribe_thread = threading.Thread(target=self.listen_subscribe, args=())
self.subscribe_thread.start()
except Exception as ex:
print('App sinks returned an error, audio backend returned error')
print(ex)
if self.windowinstance is not None:
self.windowinstance.destroy()
self.delete_event()
sys.exit(1)
def start_hardware_comboboxes(self):
for device_type in ['hi', 'a']:
self.hardware_comboboxes[device_type] = {}
name_size = 35 if device_type == 'a' else 20
if self.layout != 'default':
name_size = 100
devices = self.devices[device_type]
# for each combobox
found = False
for device_id in self.config[device_type]:
device_config = self.config[device_type][device_id]
combobox = self.builder.get_object(f'{device_type}_{device_id}_combobox')
combobox.append_text('')
for i in range(0, len(devices)):
text = devices[i]['description'][:name_size]
if len(text) == name_size:
text = text + '...'
combobox.append_text(text)
if devices[i]['name'] == device_config['name']:
found = True
combobox.set_active(i + 1)
if found is False and device_config['jack'] is False:
device_config['name'] = ''
combobox.connect('changed', self.on_combo_changed, device_type,
device_id, devices)
self.hardware_comboboxes[device_type][device_id] = combobox
def start_inputs(self):
self.rename_popover = self.builder.get_object('rename_popover')
self.Popover_Entry = self.builder.get_object('popover_entry')
self.Popover_Entry.connect('activate', self.label_rename_entry)
self.primary_buttons['vi'] = {}
# for each input device
for input_type in ['hi', 'vi']:
self.volume_adjusts[input_type] = {}
self.volume_sliders[input_type] = {}
self.mute_buttons[input_type] = {}
self.loopback_buttons[input_type] = {}
for input_id in self.config[input_type]:
if input_type == 'vi':
name = self.config['vi'][input_id]['name']
label = self.builder.get_object(f'vi_{input_id}_label')
label.set_text(name if name != '' else
f'Virtual Input {input_id}')
label_evt_box = self.builder.get_object(f'vi_{input_id}_label_event_box')
label_evt_box.connect('button_press_event', self.label_click,
label, 'vi', input_id)
primary = self.builder.get_object(f'vi_{input_id}_primary')
primary.set_active(self.config['vi'][input_id]['primary'])
if self.config['vi'][input_id]['primary'] is True:
primary.set_sensitive(False)
primary.connect('clicked', self.toggle_primary, 'vi', input_id)
self.primary_buttons['vi'][input_id] = primary
else:
# noise reduction button
rnnoise = self.builder.get_object(f'hi_{input_id}_rnnoise')
rnnoise.set_active(self.config['hi'][input_id]['use_rnnoise'])
rnnoise.connect('clicked', self.toggle_rnnoise, input_id)
rnnoise.connect('button_press_event', self.open_popover,
RnnoisePopover, input_type, input_id)
self.rnnoise_buttons[input_id] = rnnoise
# check for rnnoise plugin
found = 0
for lib in ['lib', 'lib64']:
for path in [f'/usr/{lib}/ladspa',
f'/usr/local/{lib}/ladspa']:
for plugin in ['librnnoise_ladspa.so',
'rnnoise_ladspa.so']:
if os.path.isfile(os.path.join(path, plugin)):
found = 1
break
if found == 0:
rnnoise.set_visible(False)
rnnoise.set_no_show_all(True)
source_config = self.config[input_type][input_id]
# connect volume sliders
adjust = self.builder.get_object(f'{input_type}_{input_id}_adjust')
adjust.set_value(source_config['vol'])
vol_slider = self.builder.get_object(f'{input_type}_{input_id}_vol')
vol_slider.connect('value-changed', self.volume_change,
input_type, input_id)
vol_slider.add_mark(100, Gtk.PositionType.TOP, '')
self.volume_adjusts[input_type][input_id] = adjust
self.volume_sliders[input_type][input_id] = vol_slider
# connect mute buttons
mute = self.builder.get_object(f'{input_type}_{input_id}_mute')
mute.set_active(self.config[input_type][input_id]['mute'])
mute.connect('clicked', self.toggle_mute, input_type, input_id)
self.mute_buttons[input_type][input_id] = mute
# connection buttons
self.loopback_buttons[input_type][input_id] = {}
for output_type in ['a', 'b']:
for output_id in self.config[output_type]:
sink = output_type + output_id
button = self.builder.get_object(f'{input_type}_{input_id}_{sink}')
button.set_active(source_config[sink])
self.loopback_buttons[input_type][input_id][sink] = button
button.connect('clicked', self.toggle_loopback, input_type,
input_id, output_type, output_id)
if self.config['jack']['enable'] is False:
button.connect('button_press_event', self.latency_popover,
LatencyPopover, input_type, input_id, output_type, output_id)
else:
button.connect('button_press_event',
self.open_popover, PortSelectPopover,
[input_type, input_id, sink])
# start output devices
def start_outputs(self):
self.primary_buttons['b'] = {}
for output_type in ['a', 'b']:
self.volume_adjusts[output_type] = {}
self.volume_sliders[output_type] = {}
self.mute_buttons[output_type] = {}
self.eq_buttons[output_type] = {}
for output_id in self.config[output_type]:
sink_config = self.config[output_type][output_id]
if output_type == 'b':
primary = self.builder.get_object(f'b_{output_id}_primary')
primary.set_active(sink_config['primary'])
if sink_config['primary'] is True:
primary.set_sensitive(False)
primary.connect('clicked', self.toggle_primary, 'b', output_id)
self.primary_buttons['b'][output_id] = primary
label = self.builder.get_object(f'b{output_id}_label')
if label is not None:
label.set_text(f'B{output_id} - {sink_config["name"]}')
# volume slider and adjustment
adjust = self.builder.get_object(f'{output_type}_{output_id}_adjust')
adjust.set_value(sink_config['vol'])
vol_slider = self.builder.get_object(f'{output_type}_{output_id}_vol')
vol_slider.connect('value-changed', self.volume_change,
output_type, output_id)
vol_slider.add_mark(100, Gtk.PositionType.TOP, '')
self.volume_adjusts[output_type][output_id] = adjust
self.volume_sliders[output_type][output_id] = vol_slider
# mute button
mute = self.builder.get_object(f'{output_type}_{output_id}_mute')
mute.set_active(sink_config['mute'])
mute.connect('clicked', self.toggle_mute, output_type, output_id)
self.mute_buttons[output_type][output_id] = mute
# eq button
eq = self.builder.get_object(f'{output_type}_{output_id}_eq')
eq.set_active(sink_config['use_eq'])
eq.connect('clicked', self.toggle_eq, output_type, output_id)
eq.connect('button_press_event', self.open_popover, EqPopover,
output_type, output_id)
self.eq_buttons[output_type][output_id] = eq
# to hide eq button if plugin not found
found = 0
for arc in ['', '64']:
for path in [f'/usr/lib{arc}/ladspa',
f'/usr/local/lib{arc}/ladspa']:
if os.path.isfile(os.path.join(path, 'mbeq_1197.so')):
found = 1
if found == 0:
eq.set_visible(False)
eq.set_no_show_all(True)
def toggle_eq(self, button, output_type, output_id):
state = button.get_active()
self.client.eq(output_type, output_id, state)
def toggle_rnnoise(self, widget, input_id):
state = widget.get_active()
self.client.rnnoise(input_id, state)
def toggle_mute(self, button, device_type, device_id):
state = button.get_active()
self.client.mute(device_type, device_id, state)
def toggle_loopback(self, button, input_type, input_id, output_type, output_id):
state = button.get_active()
self.client.connect(input_type, input_id, output_type, output_id, state)
def volume_change(self, slider, device_type, device_id):
val = int(slider.get_value())
if self.config[device_type][device_id]['vol'] != val:
self.client.volume(device_type, device_id, val)
def open_group_popover(self, widget):
JackGroupsPopover(widget, self.pulse)
def open_popover(self, button, event, popover, device_type, device_id):
if event.button == 3:
if self.config[device_type][device_id]['name'] != '':
popover(button, self.client, device_type, device_id)
def latency_popover(self, button, event, popover, input_type, input_id,
output_type, output_id):
if event.button == 3:
if self.config[input_type][input_id]['name'] != '':
popover(button, self.client, [input_type, input_id],
[output_type, output_id])
def label_rename_entry(self, widget):
name = widget.get_text()
device_type = self.rename_device_type
device_id = self.rename_device_id
old_name = self.active_label.get_text()
if re.match('^[a-zA-Z0-9"_"]*$', name) and name != old_name:
self.client.rename(device_type, device_id, name)
self.active_label.set_text(name)
# self.sink_input_box.load_application_list()
# self.source_output_box.load_application_list()
self.vu_list[device_type][device_id].restart()
else:
dialog = Gtk.MessageDialog(
transient_for=self.windowinstance,
flags=0,
message_type=Gtk.MessageType.INFO,
buttons=Gtk.ButtonsType.OK,
text='name is not allowed'
)
dialog.format_secondary_text('The name can only consist of numbers, letters and "_".')
dialog.run()
dialog.destroy()
return
self.rename_popover.popdown()
def label_click(self, widget, event, label, device_type, device_id):
self.rename_device_type = device_type
self.rename_device_id = device_id
self.active_label = label
self.rename_popover.set_relative_to(widget)
self.rename_popover.popup()
def on_combo_changed(self, widget, output_type, output_id, device):
model = widget.get_active()
name = device[model - 1]['name'] if model > 0 else ''
self.client.change_hardware_device(output_type, output_id, name)
self.vu_list[output_type][output_id].restart()
# if re.search('JACK:', device[model - 1]['description']):
# self.pulse.config[device_type][device_id]['jack'] = True
# else:
# self.pulse.config[device_type][device_id]['jack'] = False
def toggle_primary(self, widget, device_type, device_id):
if widget.get_active() is False:
return
else:
widget.set_sensitive(False)
button_list = self.primary_buttons[device_type]
for button in button_list:
if button_list[button] != widget:
button_list[button].set_sensitive(True)
button_list[button].set_active(False)
self.client.primary(device_type, device_id)
# if index[0] == 'vi':
# self.sink_input_box.load_application_list()
# else:
# self.source_output_box.load_application_list()
def listen_subscribe(self):
for i in self.client.subscribe():
if 'remove' in i:
id = i.split('#')[1].strip('\n')
if 'sink-input' in i:
GLib.idle_add(self.sink_input_box.remove_app_dev, id)
elif 'source-output' in i:
GLib.idle_add(self.source_output_box.remove_app_dev, id)
elif 'new' in i:
id = i.split('#')[1].strip('\n')
if 'sink-input' in i:
GLib.idle_add(self.sink_input_box.load_application_list, id)
elif 'source-output' in i:
GLib.idle_add(self.source_output_box.load_application_list, id)
def listen_socket(self):
self.client.set_callback_function('connect',
self.update_loopback_buttons)
self.client.set_callback_function('mute',
self.update_mute_buttons)
self.client.set_callback_function('primary',
self.update_primary_buttons)
self.client.set_callback_function('rnnoise',
self.update_rnnoise_buttons)
self.client.set_callback_function('eq',
self.update_eq_buttons)
self.client.set_callback_function('volume',
self.update_volume_slider)
self.client.set_callback_function('change-hd',
self.update_comboboxes)
self.client.set_callback_function('exit',
self.close_on_server_exit)
def close_on_server_exit(self):
if not self.trayonly:
self.client.end_subscribe()
self.subscribe_thread.join()
if self.enable_vumeters is True:
for i in ['hi', 'vi', 'a', 'b']:
for j in self.vu_list[i]:
self.vu_list[i][j].close()
GLib.idle_add(self.window.destroy)
Gtk.main_quit()
def update_loopback_buttons(self, input_type, input_id, output_type,
output_id, state, latency=None):
sink = output_type + output_id
state = state == 'True'
button = self.loopback_buttons[input_type][input_id][sink]
GLib.idle_add(button.set_active, state)
def update_mute_buttons(self, input_type, input_id, state):
state = state == 'True'
button = self.mute_buttons[input_type][input_id]
GLib.idle_add(button.set_active, state)
def update_volume_slider(self, device_type, device_id, val):
val = int(val)
adjust = self.volume_adjusts[device_type][device_id]
GLib.idle_add(adjust.set_value, val)
def update_primary_buttons(self, device_type, device_id):
button_list = self.primary_buttons[device_type]
for dev_id in button_list:
if dev_id == device_id:
GLib.idle_add(button_list[dev_id].set_active, True)
GLib.idle_add(button_list[dev_id].set_sensitive, False)
else:
GLib.idle_add(button_list[dev_id].set_active, False)
GLib.idle_add(button_list[dev_id].set_sensitive, True)
def update_rnnoise_buttons(self, input_id, state, control):
state = state == 'True'
button = self.rnnoise_buttons[input_id]
GLib.idle_add(button.set_active, state)
def update_eq_buttons(self, output_type, output_id, state, control):
state = state == 'True'
button = self.eq_buttons[output_type][output_id]
GLib.idle_add(button.set_active, state)
def update_comboboxes(self, device_type, device_id, device):
if device == 'None':
device = ''
devices = self.devices[device_type]
# for each combobox
device_config = self.config[device_type][device_id]
combobox = self.hardware_comboboxes[device_type][device_id]
found = False
for i in range(0, len(devices)):
if devices[i]['name'] == device:
found = True
combobox.set_active(i + 1)
if found is False and device_config['jack'] is False:
combobox.set_active(0)
def update_tray_status(self, state):
if type(state) == str:
state = state.lower() == 'true'
if not self.trayonly:
GLib.idle_add(self.tray_toggle.set_active, state)
if self.isserver:
GLib.idle_add(self.tray.set_status, int(state))
def tray_menu(self):
menu = Gtk.Menu()
item_open = Gtk.MenuItem(label='Open Pulsemeeter')
item_open.connect('activate', self.open_ui)
menu.append(item_open)
item_exit = Gtk.MenuItem(label='Close')
item_exit.connect('activate', self.tray_exit)
menu.append(item_exit)
menu.show_all()
return menu
def create_indicator(self):
indicator = AppIndicator3.Indicator.new(id='pulsemeetertray',
icon_name='Pulsemeeter',
category=AppIndicator3.IndicatorCategory.APPLICATION_STATUS)
indicator.set_status(int(self.config['tray']))
indicator.set_menu(self.tray_menu())
return indicator
# Gtk.main()
def open_ui(self, widget):
try:
self.windowinstance.present()
except Exception:
self.windowinstance = self.start_window(self.isserver)
self.trayonly = False
def tray_exit(self, widget):
if self.windowinstance is not None:
self.windowinstance.close()
self.delete_event()
# maybe TODO: the self.client does not stop listening even with stop listen
# client = Client()
# client.close_server()
Gtk.main_quit()
return 0
def delete_event(self, widget=None, event=None):
if not self.trayonly:
self.client.end_subscribe()
try:
self.subscribe_thread.join()
except Exception:
# when the application didnt manage to start the app list there is no thread.
print('Could not join subscribe_thread (maybe there is none)')
if self.enable_vumeters is True:
for i in ['hi', 'vi', 'a', 'b']:
for j in self.vu_list[i]:
self.vu_list[i][j].close()
self.trayonly = True
self.windowinstance = None
if not self.config['tray'] or not self.isserver:
self.client.close_connection()
self.client.stop_listen()
Gtk.main_quit()
return False
|
pan_tilt_tracking.py | # the goal of this file is to run all of the 4 independent processes
# that are used to control the vision system components as well as update the variables needed for the FSM
# global variable updates allow these processes to constantly update each other
# 1. objectCenter - finds/localizes the object
# 2. panning - PID control loop determines panning angle
# 3. tilting - PID control loop determines tilting angle
# 4. setServos - drives the servos to proper angles based
# on PID feedback to keep object in center
# python pan_tilt_tracking.py
# import necessary packages
from multiprocessing import Manager
from multiprocessing import Process
from imutils.video import VideoStream
from pyimagesearch.objcenter import ObjCenter
from pyimagesearch.pid import PID
import pantilthat as pth
import argparse # used for command line arguments
import signal
import time
import sys
import cv2
global panAngle
global tiltAngle
global washer_detected
# define the range for the motors
servo1Range = (-90, 90)
servo2Range = (0, 45)
# function to handle keyboard interrupt (looking for way to exit script)
# kills all 4 processes at once
def signal_handler(sig, frame):
# print a status message
print("[INFO] You pressed `ctrl + c`! Exiting...")
# disable the servos
pth.servo_enable(1, False)
pth.servo_enable(2, False)
# exit
sys.exit()
def obj_center(args, objX, objY, centerX, centerY): # first process
# signal trap to handle keyboard interrupt
signal.signal(signal.SIGINT, signal_handler)
# start the video stream and wait for the camera to warm up
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# initialize the object center finder
obj = ObjCenter(args["canny"])
# loop indefinitely
while True:
# grab the frame from the threaded video stream and flip it
# vertically (since our camera was upside down)
frame = vs.read()
frame = cv2.flip(frame, 0)
# calculate the center of the frame as this is where we will
# try to keep the object
(H, W) = frame.shape[:2]
centerX.value = W // 2
centerY.value = H // 2
# find the object's location
objectLoc = obj.update(frame, (centerX.value, centerY.value))
((objX.value, objY.value), rect) = objectLoc
# global variable for FSM
washer_detected = (len(rects))
# draw keypoints on frame
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
# the size of the circle corresponds to the size of blob
if rect is not None:
im_with_keypoints = cv2.drawKeypoints(canny, keypoints, np.array([]), (0, 0, 255),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Display the resulting frame with blobs
cv2.imshow('Pan-Tilt blob Tracking', im_with_keypoints)
out.write(im_with_keypoints)
cv2.waitKey(1)
def pid_process(output, p, i, d, objCoord, centerCoord):
# signal trap to handle keyboard interrupt
signal.signal(signal.SIGINT, signal_handler)
# create a PID and initialize it
p = PID(p.value, i.value, d.value)
p.initialize()
# loop indefinitely
while True:
# calculate the error (camera frame center - object location)
error = centerCoord.value - objCoord.value
# update the value (servo angle in degrees)
output.value = p.update(error)
def in_range(val, start, end): # servo range checker
# determine the input value is in the supplied range
return (val >= start and val <= end)
def set_servos(pan, tlt): # drives servos to specific angle values (-90-90) and (0 - 45)
# signal trap to handle keyboard interrupt
signal.signal(signal.SIGINT, signal_handler)
# loop indefinitely
while True:
# the pan and tilt angles are reversed
panAngle = -1 * pan.value
tltAngle = -1 * tlt.value
# if the pan angle is within the range, pan
if in_range(panAngle, servoRange[0], servoRange[1]):
pth.pan(panAngle)
# if the tilt angle is within the range, tilt
if in_range(tltAngle, servoRange[0], servoRange[1]):
pth.tilt(tltAngle)
return(panAngle, tltAngle)
# check to see if this is the main body of execution
if __name__ == "__main__":
# construct the argument parser and parse the arguments
# specify paths
ap = argparse.ArgumentParser()
ap.add_argument("-c", "blob_canny", type=str, required=True,
help="path to input blob and canny detector ")
args = vars(ap.parse_args())
# start a manager for managing process-safe variables
with Manager() as manager:
# enable the servos
pth.servo_enable(1, True)
pth.servo_enable(2, True)
# set integer values for the object center (x, y)-coordinates
centerX = manager.Value("i", 0)
centerY = manager.Value("i", 0)
# set integer values for the object's (x, y)-coordinates
objX = manager.Value("i", 0)
objY = manager.Value("i", 0)
# pan and tilt values will be managed by independed PIDs
pan = manager.Value("i", 0)
tlt = manager.Value("i", 0)
# set PID values for panning
panP = manager.Value("f", 0.09)
panI = manager.Value("f", 0.08)
panD = manager.Value("f", 0.002)
# set PID values for tilting
tiltP = manager.Value("f", 0.11)
tiltI = manager.Value("f", 0.10)
tiltD = manager.Value("f", 0.002)
# we have 4 independent processes
# 1. objectCenter - finds/localizes the object
# 2. panning - PID control loop determines panning angle
# 3. tilting - PID control loop determines tilting angle
# 4. setServos - drives the servos to proper angles based
# on PID feedback to keep object in center
processObjectCenter = Process(target=obj_center,
args=(args, objX, objY, centerX, centerY))
processPanning = Process(target=pid_process,
args=(pan, panP, panI, panD, objX, centerX))
processTilting = Process(target=pid_process,
args=(tlt, tiltP, tiltI, tiltD, objY, centerY))
processSetServos = Process(target=set_servos, args=(pan, tlt))
# start all 4 processes
processObjectCenter.start()
processPanning.start()
processTilting.start()
processSetServos.start()
# join all 4 processes
processObjectCenter.join()
processPanning.join()
processTilting.join()
processSetServos.join()
# disable the servos
pth.servo_enable(1, False)
pth.servo_enable(2, False) |
async_rl.py |
import time
import multiprocessing as mp
import psutil
import torch
from collections import deque
import math
from rlpyt.runners.base import BaseRunner
from rlpyt.utils.quick_args import save__init__args
from rlpyt.utils.logging import logger
from rlpyt.utils.collections import AttrDict
from rlpyt.utils.seed import set_seed, make_seed
from rlpyt.utils.prog_bar import ProgBarCounter
from rlpyt.utils.synchronize import drain_queue, find_port
THROTTLE_WAIT = 0.05
class AsyncRlBase(BaseRunner):
"""
Runs sampling and optimization asynchronously in separate Python
processes. May be useful to achieve higher hardware utilization, e.g.
CPUs fully busy simulating the environment while GPU fully busy training
the agent (there's no reason to use this CPU-only). This setup is
significantly more complicated than the synchronous (single- or multi-GPU)
runners, requires use of the asynchronous sampler, and may require special
methods in the algorithm.
Further parallelization within the sampler and optimizer are independent.
The asynchronous sampler can be serial, cpu-parallel, gpu-parallel, or
multi-gpu-parallel. The optimizer can be single- or multi-gpu.
The algorithm must initialize a replay buffer on OS shared memory. The
asynchronous sampler will allocate minibatch buffers on OS shared memory,
and yet another Python process is run to copy the completed minibatches
over to the algorithm's replay buffer. While that memory copy is
underway, the sampler immediately begins gathering the next minibatch.
Care should be taken to balance the rate at which the algorithm runs against
the rate of the sampler, as this can affect learning performance. In the existing
implementations, the sampler runs at full speed, and the algorithm may be throttled
not to exceed the specified relative rate. This is set by the algorithm's ``replay_ratio``,
which becomes the upper bound on the amount of training samples used in ratio with
the amount of samples generated. (In synchronous mode, the replay ratio is enforced
more precisely by running a fixed batch size and number of updates per iteration.)
The master process runs the (first) training GPU and performs all logging.
Within the optimizer, one agent exists. If multi-GPU, the same parameter
values are copied across all GPUs, and PyTorch's DistributedDataParallel
is used to all-reduce gradients (as in the synchronous multi-GPU runners).
Within the sampler, one agent exists. If new agent parameters are
available from the optimizer between sampler minibatches, then those
values are copied into the sampler before gathering the next minibatch.
Note:
The ``affinity`` argument should be a structure with ``sampler`` and
``optimizer`` attributes holding the respective hardware allocations.
Optimizer and sampler parallelization is determined from this.
"""
_eval = False
def __init__(
self,
algo,
agent,
sampler,
n_steps,
affinity,
seed=None,
log_interval_steps=1e5,
):
n_steps = int(n_steps)
log_interval_steps = int(log_interval_steps)
save__init__args(locals())
def train(self):
"""
Run the optimizer in a loop. Check whether enough new samples have
been generated, and throttle down if necessary at each iteration. Log
at an interval in the number of sampler iterations, not optimizer
iterations.
"""
throttle_itr, delta_throttle_itr = self.startup()
throttle_time = 0.
sampler_itr = itr = 0
if self._eval:
while self.ctrl.sampler_itr.value < 1: # Sampler does eval first.
time.sleep(THROTTLE_WAIT)
traj_infos = drain_queue(self.traj_infos_queue, n_sentinel=1)
self.store_diagnostics(0, 0, traj_infos, ())
self.log_diagnostics(0, 0, 0)
log_counter = 0
while True: # Run until sampler hits n_steps and sets ctrl.quit=True.
with logger.prefix(f"opt_itr #{itr} "):
while self.ctrl.sampler_itr.value < throttle_itr:
if self.ctrl.quit.value:
break
time.sleep(THROTTLE_WAIT)
throttle_time += THROTTLE_WAIT
if self.ctrl.quit.value:
break
if self.ctrl.opt_throttle is not None:
self.ctrl.opt_throttle.wait()
throttle_itr += delta_throttle_itr
opt_info = self.algo.optimize_agent(itr,
sampler_itr=self.ctrl.sampler_itr.value)
self.agent.send_shared_memory() # To sampler.
sampler_itr = self.ctrl.sampler_itr.value
traj_infos = (list() if self._eval else
drain_queue(self.traj_infos_queue))
self.store_diagnostics(itr, sampler_itr, traj_infos, opt_info)
if (sampler_itr // self.log_interval_itrs > log_counter):
if self._eval:
with self.ctrl.sampler_itr.get_lock():
traj_infos = drain_queue(self.traj_infos_queue, n_sentinel=1)
self.store_diagnostics(itr, sampler_itr, traj_infos, ())
self.log_diagnostics(itr, sampler_itr, throttle_time)
log_counter += 1
throttle_time = 0.
itr += 1
# Final log:
sampler_itr = self.ctrl.sampler_itr.value
traj_infos = drain_queue(self.traj_infos_queue)
if traj_infos or not self._eval:
self.store_diagnostics(itr, sampler_itr, traj_infos, ())
self.log_diagnostics(itr, sampler_itr, throttle_time)
self.shutdown()
def startup(self):
"""
Calls ``sampler.async_initialize()`` to get a double buffer for minibatches,
followed by ``algo.async_initialize()`` to get a replay buffer on shared memory,
then launches all workers (sampler, optimizer, memory copier).
"""
if self.seed is None:
self.seed = make_seed()
set_seed(self.seed)
double_buffer, examples = self.sampler.async_initialize(
agent=self.agent,
bootstrap_value=getattr(self.algo, "bootstrap_value", False),
traj_info_kwargs=self.get_traj_info_kwargs(),
seed=self.seed,
)
self.sampler_batch_size = self.sampler.batch_spec.size
self.world_size = len(self.affinity.optimizer)
n_itr = self.get_n_itr() # Number of sampler iterations.
replay_buffer = self.algo.async_initialize(
agent=self.agent,
sampler_n_itr=n_itr,
batch_spec=self.sampler.batch_spec,
mid_batch_reset=self.sampler.mid_batch_reset,
examples=examples,
world_size=self.world_size,
)
self.launch_workers(n_itr, double_buffer, replay_buffer)
throttle_itr, delta_throttle_itr = self.optim_startup()
return throttle_itr, delta_throttle_itr
def optim_startup(self):
"""
Sets the hardware affinity, moves the agent's model parameters onto
device and initialize data-parallel agent, if applicable. Computes
optimizer throttling settings.
"""
main_affinity = self.affinity.optimizer[0]
p = psutil.Process()
if main_affinity.get("set_affinity", True):
p.cpu_affinity(main_affinity["cpus"])
logger.log(f"Optimizer master CPU affinity: {p.cpu_affinity()}.")
torch.set_num_threads(main_affinity["torch_threads"])
logger.log(f"Optimizer master Torch threads: {torch.get_num_threads()}.")
self.agent.to_device(main_affinity.get("cuda_idx", None))
if self.world_size > 1:
self.agent.data_parallel()
self.algo.optim_initialize(rank=0)
throttle_itr = 1 + getattr(self.algo,
"min_steps_learn", 0) // self.sampler_batch_size
delta_throttle_itr = (self.algo.batch_size * self.world_size *
self.algo.updates_per_optimize / # (is updates_per_sync)
(self.sampler_batch_size * self.algo.replay_ratio))
self.initialize_logging()
return throttle_itr, delta_throttle_itr
def launch_workers(self, n_itr, double_buffer, replay_buffer):
self.traj_infos_queue = mp.Queue()
self.ctrl = self.build_ctrl(self.world_size)
self.launch_sampler(n_itr)
self.launch_memcpy(double_buffer, replay_buffer)
self.launch_optimizer_workers(n_itr)
def get_n_itr(self):
log_interval_itrs = max(self.log_interval_steps //
self.sampler_batch_size, 1)
n_itr = math.ceil(self.n_steps / self.log_interval_steps) * log_interval_itrs
self.log_interval_itrs = log_interval_itrs
self.n_itr = n_itr
logger.log(f"Running {n_itr} sampler iterations.")
return n_itr
def build_ctrl(self, world_size):
"""
Builds several parallel communication mechanisms for controlling the
workflow across processes.
"""
opt_throttle = (mp.Barrier(world_size) if world_size > 1 else
None)
return AttrDict(
quit=mp.Value('b', lock=True),
quit_opt=mp.RawValue('b'),
sample_ready=[mp.Semaphore(0) for _ in range(2)], # Double buffer.
sample_copied=[mp.Semaphore(1) for _ in range(2)],
sampler_itr=mp.Value('l', lock=True),
opt_throttle=opt_throttle,
eval_time=mp.Value('d', lock=True),
)
def launch_optimizer_workers(self, n_itr):
"""
If multi-GPU optimization, launches an optimizer worker for each GPU
and initializes ``torch.distributed.``
"""
if self.world_size == 1:
return
offset = self.affinity.optimizer[0].get("master_cpus", [0])[0]
port = find_port(offset=offset)
affinities = self.affinity.optimizer
runners = [AsyncOptWorker(
rank=rank,
world_size=self.world_size,
algo=self.algo,
agent=self.agent,
n_itr=n_itr,
affinity=affinities[rank],
seed=self.seed + 100,
ctrl=self.ctrl,
port=port,
) for rank in range(1, len(affinities))]
procs = [mp.Process(target=r.optimize, args=()) for r in runners]
for p in procs:
p.start()
torch.distributed.init_process_group(
backend="nccl",
rank=0,
world_size=self.world_size,
init_method=f"tcp://127.0.0.1:{port}",
)
self.optimizer_procs = procs
def launch_memcpy(self, sample_buffers, replay_buffer):
"""
Fork a Python process for each of the sampler double buffers. (It may
be overkill to use two separate processes here, may be able to simplify
to one and still get good performance.)
"""
procs = list()
for i in range(len(sample_buffers)): # (2 for double-buffer.)
ctrl = AttrDict(
quit=self.ctrl.quit,
sample_ready=self.ctrl.sample_ready[i],
sample_copied=self.ctrl.sample_copied[i],
)
procs.append(mp.Process(target=memory_copier,
args=(sample_buffers[i], self.algo.samples_to_buffer,
replay_buffer, ctrl)))
for p in procs:
p.start()
self.memcpy_procs = procs
def launch_sampler(self, n_itr):
target = run_async_sampler
kwargs = dict(
sampler=self.sampler,
affinity=self.affinity.sampler,
ctrl=self.ctrl,
traj_infos_queue=self.traj_infos_queue,
n_itr=n_itr,
)
if self._eval:
target = run_async_sampler_eval
kwargs["eval_itrs"] = self.log_interval_itrs
self.sampler_proc = mp.Process(target=target, kwargs=kwargs)
self.sampler_proc.start()
def shutdown(self):
self.pbar.stop()
logger.log("Master optimizer shutting down, joining sampler process...")
self.sampler_proc.join()
logger.log("Joining memory copiers...")
for p in self.memcpy_procs:
p.join()
if self.ctrl.opt_throttle is not None:
logger.log("Joining optimizer processes...")
self.ctrl.quit_opt.value = True
self.ctrl.opt_throttle.wait()
for p in self.optimizer_procs:
p.join()
logger.log("All processes shutdown. Training complete.")
def initialize_logging(self):
self._opt_infos = {k: list() for k in self.algo.opt_info_fields}
self._start_time = self._last_time = time.time()
self._last_itr = 0
self._last_sampler_itr = 0
self._last_update_counter = 0
def get_itr_snapshot(self, itr, sampler_itr):
return dict(
itr=itr,
sampler_itr=sampler_itr,
cum_steps=sampler_itr * self.sampler_batch_size,
cum_updates=self.algo.update_counter,
agent_state_dict=self.agent.state_dict(),
optimizer_state_dict=self.algo.optim_state_dict(),
)
def save_itr_snapshot(self, itr, sample_itr):
logger.log("saving snapshot...")
params = self.get_itr_snapshot(itr, sample_itr)
logger.save_itr_params(itr, params)
logger.log("saved")
def get_traj_info_kwargs(self):
return dict(discount=getattr(self.algo, "discount", 1))
def store_diagnostics(self, itr, sampler_itr, traj_infos, opt_info):
self._traj_infos.extend(traj_infos)
for k, v in self._opt_infos.items():
new_v = getattr(opt_info, k, [])
v.extend(new_v if isinstance(new_v, list) else [new_v])
self.pbar.update((sampler_itr + 1) % self.log_interval_itrs)
def log_diagnostics(self, itr, sampler_itr, throttle_time):
self.pbar.stop()
self.save_itr_snapshot(itr, sampler_itr)
new_time = time.time()
time_elapsed = new_time - self._last_time
new_updates = self.algo.update_counter - self._last_update_counter
new_samples = self.sampler.batch_size * (sampler_itr - self._last_sampler_itr)
updates_per_second = (float('nan') if itr == 0 else
new_updates / time_elapsed)
samples_per_second = (float('nan') if itr == 0 else
new_samples / time_elapsed)
if self._eval:
new_eval_time = self.ctrl.eval_time.value
eval_time_elapsed = new_eval_time - self._last_eval_time
non_eval_time_elapsed = time_elapsed - eval_time_elapsed
non_eval_samples_per_second = (float('nan') if itr == 0 else
new_samples / non_eval_time_elapsed)
self._last_eval_time = new_eval_time
cum_steps = sampler_itr * self.sampler.batch_size # No * world_size.
replay_ratio = (new_updates * self.algo.batch_size * self.world_size /
max(1, new_samples))
cum_replay_ratio = (self.algo.update_counter * self.algo.batch_size *
self.world_size / max(1, cum_steps))
logger.record_tabular('Iteration', itr)
logger.record_tabular('SamplerIteration', sampler_itr)
logger.record_tabular('CumTime (s)', new_time - self._start_time)
logger.record_tabular('CumSteps', cum_steps)
logger.record_tabular('CumUpdates', self.algo.update_counter)
logger.record_tabular('ReplayRatio', replay_ratio)
logger.record_tabular('CumReplayRatio', cum_replay_ratio)
logger.record_tabular('StepsPerSecond', samples_per_second)
if self._eval:
logger.record_tabular('NonEvalSamplesPerSecond', non_eval_samples_per_second)
logger.record_tabular('UpdatesPerSecond', updates_per_second)
logger.record_tabular('OptThrottle', (time_elapsed - throttle_time) /
time_elapsed)
self._log_infos()
self._last_time = new_time
self._last_itr = itr
self._last_sampler_itr = sampler_itr
self._last_update_counter = self.algo.update_counter
logger.dump_tabular(with_prefix=False)
logger.log(f"Optimizing over {self.log_interval_itrs} sampler "
"iterations.")
self.pbar = ProgBarCounter(self.log_interval_itrs)
def _log_infos(self, traj_infos=None):
if traj_infos is None:
traj_infos = self._traj_infos
if traj_infos:
for k in traj_infos[0]:
if not k.startswith("_"):
logger.record_tabular_misc_stat(k,
[info[k] for info in traj_infos])
if self._opt_infos:
for k, v in self._opt_infos.items():
logger.record_tabular_misc_stat(k, v)
self._opt_infos = {k: list() for k in self._opt_infos} # (reset)
class AsyncRl(AsyncRlBase):
"""
Asynchronous RL with online agent performance tracking.
"""
def __init__(self, *args, log_traj_window=100, **kwargs):
super().__init__(*args, **kwargs)
self.log_traj_window = int(log_traj_window)
def initialize_logging(self):
self._traj_infos = deque(maxlen=self.log_traj_window)
self._cum_completed_trajs = 0
self._new_completed_trajs = 0
super().initialize_logging()
logger.log(f"Optimizing over {self.log_interval_itrs} sampler "
"iterations.")
self.pbar = ProgBarCounter(self.log_interval_itrs)
def store_diagnostics(self, itr, sampler_itr, traj_infos, opt_info):
self._cum_completed_trajs += len(traj_infos)
self._new_completed_trajs += len(traj_infos)
super().store_diagnostics(itr, sampler_itr, traj_infos, opt_info)
def log_diagnostics(self, itr, sampler_itr, throttle_time):
logger.record_tabular('CumCompletedTrajs', self._cum_completed_trajs)
logger.record_tabular('NewCompletedTrajs', self._new_completed_trajs)
logger.record_tabular('StepsInTrajWindow',
sum(info["Length"] for info in self._traj_infos))
super().log_diagnostics(itr, sampler_itr, throttle_time)
self._new_completed_trajs = 0
class AsyncRlEval(AsyncRlBase):
"""
Asynchronous RL with offline agent performance evaluation.
"""
_eval = True
def initialize_logging(self):
self._traj_infos = list()
self._last_eval_time = 0.
super().initialize_logging()
self.pbar = ProgBarCounter(self.log_interval_itrs)
def log_diagnostics(self, itr, sampler_itr, throttle_time):
if not self._traj_infos:
logger.log("WARNING: had no complete trajectories in eval.")
steps_in_eval = sum([info["Length"] for info in self._traj_infos])
logger.record_tabular('StepsInEval', steps_in_eval)
logger.record_tabular('TrajsInEval', len(self._traj_infos))
logger.record_tabular('CumEvalTime', self.ctrl.eval_time.value)
super().log_diagnostics(itr, sampler_itr, throttle_time)
self._traj_infos = list() # Clear after each eval.
###############################################################################
# Worker processes.
###############################################################################
class AsyncOptWorker:
def __init__(
self,
rank,
world_size,
algo,
agent,
n_itr,
affinity,
seed,
ctrl,
port
):
save__init__args(locals())
def optimize(self):
self.startup()
itr = 0
while True:
self.ctrl.opt_throttle.wait()
if self.ctrl.quit_opt.value:
break
self.algo.optimize_agent(itr, sampler_itr=self.ctrl.sampler_itr.value) # Leave un-logged.
itr += 1
self.shutdown()
def startup(self):
torch.distributed.init_process_group(
backend="nccl",
rank=self.rank,
world_size=self.world_size,
init_method=f"tcp://127.0.0.1:{self.port}",
)
p = psutil.Process()
if self.affinity.get("set_affinity", True):
p.cpu_affinity(self.affinity["cpus"])
logger.log(f"Optimizer rank {self.rank} CPU affinity: {p.cpu_affinity()}.")
torch.set_num_threads(self.affinity["torch_threads"])
logger.log(f"Optimizer rank {self.rank} Torch threads: {torch.get_num_threads()}.")
logger.log(f"Optimizer rank {self.rank} CUDA index: "
f"{self.affinity.get('cuda_idx', None)}.")
set_seed(self.seed)
self.agent.to_device(cuda_idx=self.affinity.get("cuda_idx", None))
self.agent.data_parallel()
self.algo.optim_initialize(rank=self.rank)
def shutdown(self):
logger.log(f"Async optimization worker {self.rank} shutting down.")
def run_async_sampler(sampler, affinity, ctrl, traj_infos_queue, n_itr):
"""
Target function for the process which will run the sampler, in the case of
online performance logging. Toggles the sampler's double-buffer for each
iteration, waits for the memory copier to finish before writing into that
buffer, and signals the memory copier when the sampler is done writing a
minibatch.
"""
sampler.initialize(affinity)
db_idx = 0
for itr in range(n_itr):
ctrl.sample_copied[db_idx].acquire()
traj_infos = sampler.obtain_samples(itr, db_idx)
ctrl.sample_ready[db_idx].release()
with ctrl.sampler_itr.get_lock():
for traj_info in traj_infos:
traj_infos_queue.put(traj_info)
ctrl.sampler_itr.value = itr
db_idx ^= 1 # Double buffer.
logger.log(f"Async sampler reached final itr: {itr + 1}, quitting.")
ctrl.quit.value = True # This ends the experiment.
sampler.shutdown()
for s in ctrl.sample_ready:
s.release() # Let memcpy workers finish and quit.
def run_async_sampler_eval(sampler, affinity, ctrl, traj_infos_queue,
n_itr, eval_itrs):
"""
Target function running the sampler with offline performance evaluation.
"""
sampler.initialize(affinity)
db_idx = 0
for itr in range(n_itr + 1): # +1 to get last eval :)
ctrl.sample_copied[db_idx].acquire()
# assert not ctrl.sample_copied[db_idx].acquire(block=False) # Debug check.
sampler.obtain_samples(itr, db_idx)
ctrl.sample_ready[db_idx].release()
if itr % eval_itrs == 0:
eval_time = -time.time()
traj_infos = sampler.evaluate_agent(itr)
eval_time += time.time()
ctrl.eval_time.value += eval_time # Not atomic but only writer.
with ctrl.sampler_itr.get_lock():
for traj_info in traj_infos:
traj_infos_queue.put(traj_info)
traj_infos_queue.put(None) # Master will get until None sentinel.
ctrl.sampler_itr.value = itr
else:
ctrl.sampler_itr.value = itr
db_idx ^= 1 # Double buffer
logger.log(f"Async sampler reached final itr: {itr + 1}, quitting.")
ctrl.quit.value = True # This ends the experiment.
sampler.shutdown()
for s in ctrl.sample_ready:
s.release() # Let memcpy workers finish and quit.
def memory_copier(sample_buffer, samples_to_buffer, replay_buffer, ctrl):
"""
Target function for the process which will copy the sampler's minibatch buffer
into the algorithm's main replay buffer.
Args:
sample_buffer: The (single) minibatch buffer from the sampler, on shared memory.
samples_to_buffer: A function/method from the algorithm to process samples from the minibatch buffer into the replay buffer (e.g. select which fields, compute some prioritization).
replay_buffer: Algorithm's main replay buffer, on shared memory.
ctrl: Structure for communicating when the minibatch is ready to copy/done copying.
Warning:
Although this function may use the algorithm's ``samples_to_buffer()``
method, here it is running in a separate process, so will not be aware
of changes in the algorithm from the optimizer process. Furthermore,
it may not be able to store state across iterations--in the
implemented setup, two separate memory copier processes are used, so
each one only sees every other minibatch. (Could easily change to
single copier if desired, and probably without peformance loss.)
"""
# Needed on some systems to avoid mysterious hang:
torch.set_num_threads(1)
# (Without torch.set_num_threads, experienced hang on Ubuntu Server 16.04
# machines (but not Desktop) when appending samples to make replay buffer
# full, but only for batch_B > 84 (dqn + r2d1 atari), regardless of replay
# size or batch_T. Would seem to progress through all code in
# replay.append_samples() but simply would not return from it. Some
# tipping point for MKL threading?)
while True:
ctrl.sample_ready.acquire()
# assert not ctrl.sample_ready.acquire(block=False) # Debug check.
if ctrl.quit.value:
break
replay_buffer.append_samples(samples_to_buffer(sample_buffer))
ctrl.sample_copied.release()
logger.log("Memory copier shutting down.")
def placeholder(x):
pass |
tello_camera_aruco_marker_detection.py | # This python script accesses Tello's camera, takes off, and then detects markers to perform certain commands
# In this case we detect marker id 33 and do a forward flip while marker id 1 commands Tello to land
# Modify this script to suit your needs and feel free to open a GitHub issue with any questions
import cv2
import numpy as np
import time
from cv2 import aruco
import socket
import threading
# Command status flag
COMMAND_IN_PROGRESS = False
# Change these to whatever marker ids you prefer
FLIP_MARKER_ID = 33
LAND_MARKER_ID = 1
# IP and port of Tello
tello_address = ('192.168.10.1', 8889)
# Create a UDP connection that we'll send the command to
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Let's be explicit and bind to a local port on our machine where Tello can send messages
sock.bind(('', 9000))
# Setup the aruco marker detection
aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
aruco_params = aruco.DetectorParameters_create()
# Function to send a comand to Tello
def send(message):
try:
sock.sendto(message.encode(), tello_address)
print("Sending message: " + message)
except Exception as e:
print("Error sending: " + str(e))
# Function to receive the message from Tello
def receive():
# Make this global so it can be accessed in the thread
global COMMAND_IN_PROGRESS
# Continuously loop and listen for incoming messages
while True:
# Try to receive the message otherwise print the exception
try:
response, _ = sock.recvfrom(128)
message = response.decode(encoding='utf-8')
print("Received message: " + message)
# If a command was in progress let's reset it to False
if COMMAND_IN_PROGRESS and "ok" in message:
print("resetting command in progress")
COMMAND_IN_PROGRESS = False
except Exception as e:
# If there's an error close the socket and break out of the loop
sock.close()
print("Error receiving: " + str(e))
break
# Create and start a listening thread that runs in the background
receiveThread = threading.Thread(target=receive)
receiveThread.daemon = True
receiveThread.start()
# Initiate tello connection
send("command")
# Delay for 1 second
time.sleep(1)
# Start the camera stream
send("streamon")
# Delay for 1 second
time.sleep(1)
# Get the video stream from Tello on port 11111
camera = cv2.VideoCapture('udp://127.0.0.1:11111')
# This will give the video stream some time to display
time.sleep(3)
# Takeoff
send("takeoff")
# Loop until program is stopped with q on the keyboard
while(True):
# Capture frame-by-frame
ret, frame = camera.read()
# Convert the color frame to grayscale for marker detection
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Get marker corners and ids
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=aruco_params)
markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
# Loop through the markers (in case more than one is detected)
for index, id in np.ndenumerate(ids):
# If we find marker 33 then let's do a front flip
if not COMMAND_IN_PROGRESS:
# Do a flip based on aruco marker
if id == FLIP_MARKER_ID:
print("Flip marker detected!!!")
send("flip f")
COMMAND_IN_PROGRESS = True
# Land based on aruco marker
elif id == LAND_MARKER_ID:
print("Land marker detected!!!")
send("land")
COMMAND_IN_PROGRESS = True
# Display the resulting frame
cv2.imshow('Tello', markers)
#time.sleep(.100)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done let's do some cleanup
sock.close()
camera.release()
cv2.destroyAllWindows() |
traverse_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-locals,too-many-statements,too-many-branches,protected-access
"""API for graph traversing."""
import threading
import tvm
from tvm import relay, autotvm
from tvm.relay import transform
from tvm.relay.expr import Call, TupleGetItem, Var, Constant, Tuple
from tvm.relay.function import Function
from tvm.relay.ty import TupleType, TensorType
from tvm.autotvm.task import TaskExtractEnv
from .utils import has_multiple_inputs, is_boundary_node, is_skipped_node
from .._base import OPT_OUT_OP
def expr2graph(expr, target_ops, node_dict, node_list):
"""Convert relay expr to graph data structure
and fetch workloads of target operators.
Parameters
----------
expr : tvm.relay.Expr.Function
Input relay function expression.
target_ops: List of tvm.ir.Op
List of target relay ops
node_dict : dictionary from tvm.relay.Expr to int
Dictionary to record node index
node_list : list of dictionary
List of nodes which contains all expr in the input relay function.
Each node will be stored as a dictionary in the format of
{"op": str, "node": tvm.relay.expr, "inputs": [int], "types": [tvm.relay.Type],
"name": str, "workloads": [tuple], "topi_op": [function]}
"""
# TODO(@kevinthesun, @icemelon9): Currently graph tuning pass relies on the fact
# that # autotvm tasks == # ops. But this won't be true after having relay op
# strategy. We need to find a solution to fix this.
env = TaskExtractEnv.get(allow_duplicate=True)
env.reset(target_ops)
# pylint: disable=not-context-manager
with env:
_expr2graph_impl(expr, target_ops, node_dict, node_list)
task_pos = 0
for node_entry in node_list:
if node_entry["op"] in target_ops:
task_name, args = env.task_collection[task_pos]
task = autotvm.task.create(task_name, args, target="llvm", target_host=None)
node_entry["workloads"] = [task.workload]
node_entry["topi_op"] = [task_name]
task_pos += 1
def _infer_type(node):
"""A method to infer the type of a relay expression."""
mod = tvm.IRModule.from_expr(node)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(node, relay.Function) else entry.body
def _expr2graph_impl(expr, target_ops, node_dict, node_list):
"""Implementation to convert relay expr to graph data structure"""
def _traverse_expr(node):
if node in node_dict:
return
node_index = len(node_list)
node_entry = {"node": node, "inputs": [], "types": [], "op": None, "name": None}
if isinstance(node, Call):
op = node.op
node_entry["op"] = node.op
for arg in node.args:
in_node_idx = node_dict[arg]
if isinstance(arg, (Tuple, TupleGetItem)):
node_entry["inputs"] += node_list[in_node_idx]["inputs"]
else:
node_entry["inputs"].append([in_node_idx, 0, 0])
infer_out = _infer_type(node)
out_type = infer_out._checked_type_
if isinstance(out_type, TensorType):
node_entry["types"].append(out_type)
elif isinstance(out_type, TupleType):
for tupe_type in out_type.fields:
node_entry["types"].append(tupe_type)
else:
raise RuntimeError(
"Unsupported output type %s in operator %s" % (type(out_type), op.name)
)
# Utilize tracing target to fetch workload with topo-order.
# Since we only need workload, dummy target can be used to
# create task.
if op in target_ops:
params = []
for i, input_idx in enumerate(node_entry["inputs"]):
input_node_entry = node_list[input_idx[0]]
input_type = input_node_entry["types"][input_idx[1]]
if not isinstance(input_node_entry["node"], (Var, Constant, Call)):
raise RuntimeError(
"Graph tuner can only tune target "
"operators with input node of type "
"relay.expr.Var/Constant/Call. Now "
"find a target op %s with input type %s"
% (op, str(type(input_node_entry["node"])))
)
free_var = relay.Var("var_%d" % i, input_type)
params.append(free_var)
call = relay.Call(node.op, params, node.attrs)
mod = tvm.IRModule.from_expr(relay.Function(params, call))
relay.backend.compile_engine.get().clear()
build_thread = threading.Thread(
target=relay.build, args=(mod, "llvm -device=tracing", None, None)
)
build_thread.start()
build_thread.join()
elif isinstance(node, Var):
node_entry["name"] = node.name_hint
node_entry["types"] = [node.type_annotation]
elif isinstance(node, Function):
# Ignore root node since it equals to input function expression
if node != expr:
_expr2graph_impl(node, target_ops, node_dict, node_list)
return
elif isinstance(node, TupleGetItem):
in_node_idx = node_dict[node.tuple_value]
node_entry["inputs"].append([in_node_idx, node.index, 0])
elif isinstance(node, Tuple):
for tuple_item in node:
in_node_idx = node_dict[tuple_item]
if isinstance(tuple_item, TupleGetItem):
node_entry["inputs"] += node_list[in_node_idx]["inputs"]
elif isinstance(tuple_item, Tuple):
raise RuntimeError("Graph tuner doesn't support nested tuple.")
else:
node_entry["inputs"].append([in_node_idx, 0, 0])
elif isinstance(node, Constant):
node_entry["name"] = "Constant_" + str(node_index)
node_entry["types"] = [node.checked_type]
elif isinstance(node, tvm.ir.Op):
return
else:
raise RuntimeError(
"Not supported relay node type in graph tuning: %s" % str(type(node))
)
node_dict[node] = node_index
node_list.append(node_entry)
relay.analysis.post_order_visit(expr, _traverse_expr)
def get_direct_ancestor(node_list, visited_dict, target_ops, node_idx, input_names):
"""Given a node_list in relay function and a node index, return the
closest ancestor which has op_name as operator name or is multi_input operator.
If node has multiple inputs, multiple ancestor nodes will be returned.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
visited_dict : dict of int to int
Nodes and corresponding ancestors which have been visited.
target_ops: List of str
List of target relay base op name
node_idx : int
Input node index.
input_names : list of str
Names of graph input nodes.
Returns
-------
out : list of int
List of ancestor node index.
"""
if node_idx in visited_dict:
return visited_dict[node_idx]
node = node_list[node_idx]
if is_boundary_node(node, input_names):
return [node_idx]
node_direct_ancestor = []
for item_idx in node["inputs"]:
item = node_list[item_idx[0]]
is_multiple_inputs = has_multiple_inputs(node_list, item_idx[0], input_names, OPT_OUT_OP)
if item["op"] in target_ops or is_multiple_inputs:
node_direct_ancestor.append(item_idx[0])
else:
tmp = get_direct_ancestor(node_list, visited_dict, target_ops, item_idx[0], input_names)
for tmp_item in tmp:
if tmp_item not in node_direct_ancestor:
node_direct_ancestor.append(tmp_item)
visited_dict[node_idx] = node_direct_ancestor
return node_direct_ancestor
def get_in_nodes(node_list, target_ops, input_names):
"""Create a dictionary mapping from op_name nodes or multi-input
nodes to closest input ancestors.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
target_ops: List of str
List of target relay op
input_names : list of str
Names of graph input nodes.
Returns
-------
out : dict of int to list of int
Dictionary maps node index to closest input ancestors.
"""
visited_dict = {}
in_node_dict = {}
for i, node in enumerate(node_list):
if is_boundary_node(node, input_names) or is_skipped_node(node):
continue
get_direct_ancestor(node_list, visited_dict, target_ops, i, input_names)
for key, val in visited_dict.items():
node = node_list[key]
is_multiple_inputs = has_multiple_inputs(node_list, key, input_names, OPT_OUT_OP)
if node["op"] in target_ops or is_multiple_inputs:
in_node_dict[key] = val
# Reduce boundary nodes
out_node_dict = get_out_nodes(in_node_dict)
has_reduced_node = True
while has_reduced_node:
boundary_nodes = []
for key, val in in_node_dict.items():
node = node_list[key]
is_boundary = True
# Target ops can't be boundary nodes
if node["op"] not in target_ops:
for input_idx in val:
in_node = node_list[input_idx]
if not is_boundary_node(in_node, input_names) and input_idx in in_node_dict:
is_boundary = False
else:
val.remove(input_idx)
if is_boundary:
boundary_nodes.append(key)
if boundary_nodes:
for idx in boundary_nodes:
if idx in in_node_dict:
del in_node_dict[idx]
else:
has_reduced_node = False
# Remove empty nodes to ignore pre-computed sub-graph
has_empty_node = True
while has_empty_node:
empty_nodes = []
for key, val in in_node_dict.items():
if not val:
empty_nodes.append(key)
if empty_nodes:
has_empty_node = True
for node in empty_nodes:
del in_node_dict[node]
if node in out_node_dict:
for out_node in out_node_dict[node]:
in_node_dict[out_node].remove(node)
else:
has_empty_node = False
return in_node_dict
def get_out_nodes(in_node_dict):
"""Create output dictionary from input dictionary.
Parameters
----------
in_node_dict : dict of int to list of int
Dictionary maps node index to closest input ancestors.
It can be created with get_in_nodes.
Returns
-------
out : dict of int to list of int
Dictionary maps node index to closest output nodes.
"""
out_node_dict = {}
for key in in_node_dict:
out_node_dict[key] = []
for key, val in in_node_dict.items():
for item in val:
if item in out_node_dict:
out_node_dict[item].append(key)
else:
out_node_dict[item] = [key]
return out_node_dict
|
main.py | """
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import sys
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_pytorch":
(imagenet.Imagenet, dataset.pre_process_imagenet_pytorch, dataset.PostProcessArgMax(offset=0),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3],"use_label_map": False}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", default="output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", help="enable finding peak performance pass")
parser.add_argument("--debug", action="store_true", help="debug, turn traces on")
# file to use mlperf rules compliant parameters
parser.add_argument("--mlperf_conf", default="../../mlperf.conf", help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument("--user_conf", default="user.conf", help="user config for user LoadGen settings such as target QPS")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--samples-per-query", type=int, help="mlperf multi-stream sample per query")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "tvm":
if os.environ.get('CK_USE_OCTOMIZER','').lower()!='yes':
from backend_tvm import BackendTVM
backend = BackendTVM()
else:
from backend_octomizer import BackendOctomizer
backend = BackendOctomizer()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
src = [self.ds.get_item_loc(i) for i in qitem.content_id]
log.error("thread: failed on contentid=%s, %s", src, ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i:i+bs])
self.run_one_item(Item(query_id[i:i+bs], idx[i:i+bs], data, label))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
self.tasks = Queue(maxsize=threads * 4)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
ie = i + bs
data, label = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], data, label))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
# If TVM add max_batchsize
if args.backend == "tvm":
backend.max_batchsize = args.max_batchsize
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count_override = False
count = args.count
if count:
count_override = True
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=count, **kwargs)
# load model to backend
model = backend.load(args.model,inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"args": vars(args),
"cmdline": str(args),
}
mlperf_conf = os.path.abspath(args.mlperf_conf)
if not os.path.exists(mlperf_conf):
log.error("{} not found".format(mlperf_conf))
sys.exit(1)
user_conf = os.path.abspath(args.user_conf)
if not os.path.exists(user_conf):
log.error("{} not found".format(user_conf))
sys.exit(1)
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
scenario = SCENARIO_MAP[args.scenario]
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries():
pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
log_output_settings = lg.LogOutputSettings()
log_output_settings.outdir = output_dir
log_output_settings.copy_summary_to_stdout = False
log_settings = lg.LogSettings()
log_settings.enable_trace = args.debug
log_settings.log_output = log_output_settings
settings = lg.TestSettings()
settings.FromConfig(mlperf_conf, args.model_name, args.scenario)
settings.FromConfig(user_conf, args.model_name, args.scenario)
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.find_peak_performance:
settings.mode = lg.TestMode.FindPeakPerformance
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if count_override:
settings.min_query_count = count
settings.max_query_count = count
if args.samples_per_query:
settings.multi_stream_samples_per_query = args.samples_per_query
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.multi_stream_target_latency_ns = int(args.max_latency * NANO_SEC)
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, 500), ds.load_query_samples, ds.unload_query_samples)
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=args.output)
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open("results.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
hmk.py | from pepper.framework import *
from pepper.responder import *
from pepper.knowledge import sentences, animations
from pepper.language import Utterance
from pepper.language.generation.reply import reply_to_question
from pepper import config
from threading import Thread
from random import choice
from time import time
import urllib
import re
SPEAKER_NAME_THIRD = "Her Majesty"
SPEAKER_NAME = "Your Majesty"
SPEAKER_FACE = "Her Majesty"
DEFAULT_SPEAKER = "Human"
LOCATION_NAME = "Royal Netherlands Academy of Arts and Sciences"
VU_NAME_PHONETIC = r"\\toi=lhp\\ fraiE universitai_t Amster_dam \\toi=orth\\"
IMAGE_VU = "https://www.vu.nl/nl/Images/VUlogo_NL_Wit_HR_RGB_tcm289-201376.png"
IMAGE_SELENE = "http://wordpress.let.vupr.nl/understandinglanguagebymachines/files/2019/06/7982_02_34_Selene_Orange_Unsharp_Robot_90kb.jpg"
IMAGE_LENKA = "http://wordpress.let.vupr.nl/understandinglanguagebymachines/files/2019/06/8249_Lenka_Word_Object_Reference_106kb.jpg"
IMAGE_BRAM = "http://makerobotstalk.nl/files/2018/12/41500612_1859783920753781_2612366973928996864_n.jpg"
IMAGE_PIEK = "http://www.cltl.nl/files/2019/10/8025_Classroom_Piek.jpg"
BREXIT_QUESTION = "What do you think are the implications of the Brexit for scientists?"
BREXIT_ANSWER = "Do you have a question for me?"
MIN_ANSWER_LENGTH = 4
# Override Speech Speed for added clarity!
config.NAOQI_SPEECH_SPEED = 80
RESPONDERS = [
BrainResponder(),
VisionResponder(), PreviousUtteranceResponder(), IdentityResponder(), LocationResponder(), TimeResponder(),
QnAResponder(), BrexitResponder(),
GreetingResponder(), GoodbyeResponder(), ThanksResponder(), AffirmationResponder(), NegationResponder(),
UnknownResponder()
]
class HMKApp(AbstractApplication, StatisticsComponent,
BrainComponent, ContextComponent,
ObjectDetectionComponent, FaceRecognitionComponent,
SpeechRecognitionComponent, TextToSpeechComponent):
SUBTITLES_URL = "https://bramkraai.github.io/subtitle?text={}"
def __init__(self, backend):
super(HMKApp, self).__init__(backend)
self.tablet.show(IMAGE_VU)
def say(self, text, animation=None, block=True):
super(HMKApp, self).say(text, animation, block)
sleep(1.5)
def show_text(self, text):
text_websafe = urllib.quote(''.join([i for i in re.sub(r'\\\\\S+\\\\', "", text) if ord(i) < 128]))
self.backend.tablet.show(self.SUBTITLES_URL.format(text_websafe))
class WaitForStartCueIntention(AbstractIntention, HMKApp):
START_CUE_TEXT = [
"she's here",
"she is here",
"the queen is here",
"you may begin",
"you may start",
"you can begin",
"you can start"
]
def __init__(self, application):
super(WaitForStartCueIntention, self).__init__(application)
# Initialize Response Picker
self.response_picker = ResponsePicker(self, RESPONDERS)
# Start Chat with Default Speaker
self.context.start_chat(DEFAULT_SPEAKER)
def on_face(self, faces):
# If Start Face Cue is observed by Leolani -> Start Main Intention
if any([face.name == SPEAKER_FACE for face in faces]):
self.say("Ah, I can see {}! Let me begin!".format(SPEAKER_NAME_THIRD))
IntroductionIntention(self.application)
def on_chat_turn(self, utterance):
# If Start Text Cue is observed by Leolani -> Respond Happy & Start Main Intention
transcript = utterance.transcript.lower()
if any([cue in transcript for cue in self.START_CUE_TEXT]):
self.say("Oh, {}!".format(choice(sentences.HAPPY)), animation=animations.HAPPY)
IntroductionIntention(self.application)
return
class IntroductionIntention(AbstractIntention, HMKApp):
def __init__(self, application):
super(IntroductionIntention, self).__init__(application)
# Start Chat with Main Speaker
self.context.start_chat(SPEAKER_NAME)
# Start Speech
Thread(target=self.speech).start()
def speech(self):
# 1.1 - Welcome
self.say("Hello {}. Welcome to the {}. ...".format(SPEAKER_NAME, LOCATION_NAME), animations.BOW)
self.say("We are honoured to have you here!")
# 1.2 - Introduction
self.say(r"I am Leolani... My name means \\vct=50\\ Voice of an Angel \\vct=100\\. in Hawaiian.", animations.I)
self.say("I am built by students from the VU that come from all over the world. ", animations.ONCE_UPON_A_TIME)
self.tablet.show(IMAGE_SELENE)
self.say("Selene, from Mexico, designed my brain and thoughts!", animations.TABLET)
self.tablet.show(IMAGE_LENKA)
self.say("Lenka, from Serbia, taught me to understand language", animations.TABLET)
self.tablet.show(IMAGE_BRAM)
self.say("Bram, from the Netherlands, programmed me to perceive the world around me.", animations.TABLET)
self.tablet.show(IMAGE_PIEK)
self.say(
"Peek, from the Netherlands, and I, from France and Japan, work on identity, reference and perspective in language!",
animations.TABLET)
self.tablet.show(IMAGE_VU)
sleep(2.5)
# 2.1 - Brexit Introduction
self.say("I believe you are here to talk about the implications of the Brexit for scientists", animations.THINK)
self.say("I am just a robot, but I have my little robot thoughts...", animations.SHY)
self.say("The students at the VU that created me, come from all over the world... "
"International exchange and collaboration is key to: innovation, success, "
"and the spread of knowledge and democratic values... "
"Brexit is not helping us with this... "
"Europe is the beginning, the Universe is our goal... ...")
# 2.2 - Brexit Knowledge
self.say("As a robot, I have learned a few things about the Brexit myself!", animations.THINK)
# 2.2.1 - Brexit in the News
self.say("I have been reading the news lately!")
self.say(choice(sentences.BREXIT_NEWS))
self.say("{}!".format(choice(sentences.HAPPY)), animations.HAPPY)
# 2.2.2 - Brexit in Brain
self.say("I also have been talking about the Brexit with my friends!")
self.brexit_in_brain()
self.say("I learn a lot from my friends!")
sleep(2.5)
# Move to Brexit QnA
BrexitQuestionIntention(self.application)
def brexit_in_brain(self):
self.answer_brain_query("what is the brexit about")
self.answer_brain_query("what is the brexit")
self.answer_brain_query("what is the brexit in")
def answer_brain_query(self, query):
try:
question = self.context.chat.add_utterance([UtteranceHypothesis(query, 1)], False)
question.analyze()
brain_response = self.brain.query_brain(question)
reply = reply_to_question(brain_response)
if reply: self.say(reply, block=False)
except Exception as e:
self.log.error(e)
# 2.3 - Brexit Question
class BrexitQuestionIntention(AbstractIntention, HMKApp):
def __init__(self, application):
super(BrexitQuestionIntention, self).__init__(application)
# Initialize Response Picker
self.response_picker = ResponsePicker(self, RESPONDERS)
self._retried = False
# Start Chat with Speaker if not already running
if not self.context.chatting:
self.context.start_chat(SPEAKER_NAME)
# Ask Brexit Question
self.say("Oh {}, I think I have a question for you!".format(SPEAKER_NAME), animations.EXPLAIN)
self.show_text(BREXIT_QUESTION)
self.say(BREXIT_QUESTION)
def on_chat_turn(self, utterance):
responder = self.response_picker.respond(utterance)
if self.context.chat.last_utterance.transcript.endswith("?"):
self.say("Oops, nevermind me asking these questions. I'm just a very curious robot!", animations.ASHAMED)
# If Pepper does not understand?
if isinstance(responder, UnknownResponder) and len(utterance.tokens) < MIN_ANSWER_LENGTH and not self._retried:
# -> Repeat Question
self._retried = True
self.say("But, {}".format(BREXIT_QUESTION))
else: # If a decent response can be formed
# -> Thank Speaker and Move on to BrexitAnswerIntention
self.say("Thank you for your answer!", animations.HAPPY)
self.tablet.show(IMAGE_VU)
BrexitAnswerIntention(self.application)
# 2.4 - Brexit Answer
class BrexitAnswerIntention(AbstractIntention, HMKApp):
def __init__(self, application):
super(BrexitAnswerIntention, self).__init__(application)
# Initialize Response Picker
self.response_picker = ResponsePicker(self, RESPONDERS)
self._retried = False
# Start Chat with Speaker if not already running
if not self.context.chatting:
self.context.start_chat(SPEAKER_NAME)
self.show_text(BREXIT_ANSWER)
self.say(BREXIT_ANSWER)
def on_chat_turn(self, utterance):
responder = self.response_picker.respond(utterance)
if self.context.chat.last_utterance.transcript.endswith("?"):
self.say("Oops, nevermind me asking these questions. I'm just a very curious robot!", animations.ASHAMED)
# If Pepper does not understand?
if isinstance(responder, UnknownResponder) and len(utterance.tokens) < MIN_ANSWER_LENGTH and not self._retried:
# -> Repeat Question
self._retried = True
self.say("But, {}".format(BREXIT_ANSWER))
else: # If a decent response can be formed
# -> Thank Speaker and Move on to OutroIntention
self.say("Thank you!", animations.HAPPY)
self.tablet.show(IMAGE_VU)
OutroIntention(self.application)
class OutroIntention(AbstractIntention, HMKApp):
def __init__(self, application):
super(OutroIntention, self).__init__(application)
# Initialize Response Picker
self.response_picker = ResponsePicker(self, RESPONDERS)
# Start Chat with Speaker if not already running
if not self.context.chatting:
self.context.start_chat(SPEAKER_NAME)
Thread(target=self.speech).start()
def speech(self):
# 5.1 - Wish all a fruitful discussion
self.say("I see that there are {0} people here... I wish all {0} of you a fruitful discussion!".format(
len([obj for obj in self.context.objects if obj.name == "person"])), animations.HELLO)
# 5.2 - Goodbye
self.say("It's a pity the King is not here. Please say hello to him and your daughters for me.",
animations.FRIENDLY)
self.say("It was nice having talked to you, {}! ... ...".format(SPEAKER_NAME), animations.BOW)
self.say("If you have any questions, you can always ask me later!")
sleep(4)
self.say("I believe it is now time for a group picture! I love pictures!", animations.HAPPY)
# Switch to Default Intention
DefaultIntention(self.application)
class DefaultIntention(AbstractIntention, HMKApp):
IGNORE_TIMEOUT = 60
def __init__(self, application):
super(DefaultIntention, self).__init__(application)
self._ignored_people = {}
self.response_picker = ResponsePicker(self, RESPONDERS)
def on_chat_enter(self, name):
self._ignored_people = {n: t for n, t in self._ignored_people.items() if time() - t < self.IGNORE_TIMEOUT}
if name not in self._ignored_people:
self.context.start_chat(name)
self.say("{}, {}".format(choice(sentences.GREETING), name))
def on_chat_exit(self):
self.say("{}, {}".format(choice(sentences.GOODBYE), self.context.chat.speaker))
self.context.stop_chat()
def on_chat_turn(self, utterance):
responder = self.response_picker.respond(utterance)
if isinstance(responder, GoodbyeResponder):
self._ignored_people[utterance.chat.speaker] = time()
self.context.stop_chat()
if __name__ == '__main__':
# Initialize Application
application = HMKApp(config.get_backend())
# Initialize Intention
WaitForStartCueIntention(application)
# Run Application
application.run()
|
tasks.py | import os
import traceback
from datetime import datetime
from functools import partial
from threading import Thread
from tkinter import messagebox
import requests
from league_connection import LeagueConnection
from league_connection.exceptions import LeagueConnectionError
from client.exceptions import AccountBannedException
from client.exceptions import AuthenticationFailureException
from client.exceptions import ClientException
from client.exceptions import ConsentRequiredException
from client.exceptions import NameChangeRequiredException
from client.exceptions import RegionMissingException
from client.export import add_info_to_account
from client.export import export_account
from client.login import login
from client.session import wait_session
from client.username import check_username
from process.league import kill_league_client
from process.league import kill_riot_client
from process.league import open_league_client
from process.league import open_riot_client
from process.league import remove_lockfile
from .entries import get_config_from_entries
from .logger import logger
from .options import get_options
SKIP_ACCOUNT_EXCEPTIONS = (
AccountBannedException,
AuthenticationFailureException,
ConsentRequiredException,
NameChangeRequiredException,
RegionMissingException,
)
now = datetime.now().strftime("%Y-%b-%d %H-%M-%S").lower()
output_file = f'output_{now}.txt'
def execute(connection, selected, options_mapped):
for option in selected:
_, display_name, data = options_mapped[option]
logger.info(f'Current task: {display_name}')
func, args, kwargs = data
func = partial(func, connection)
func(*args, **kwargs)
def execute_tasks_single_account(username, password, selected, options_mapped):
account = {'username': username, 'password': password}
riot_lockfile = os.path.expanduser(os.environ['RIOT_CLIENT_LOCKFILE'])
lockfile_dir = os.path.dirname(os.environ['LEAGUE_CLIENT'])
league_lockfile = os.path.join(lockfile_dir, 'lockfile')
while True:
try:
open_riot_client(os.environ['RIOT_CLIENT_SERVICES'])
logger.info('Getting riot client connection...')
riot_connection = LeagueConnection(riot_lockfile)
login(riot_connection, username, password)
open_league_client(os.environ['LEAGUE_CLIENT'])
logger.info('Getting league client connection...')
league_connection = LeagueConnection(league_lockfile)
league_connection.post('/riotclient/kill-ux')
wait_session(league_connection)
check_username(league_connection, username)
execute(league_connection, selected, options_mapped)
add_info_to_account(league_connection, account)
export_account(account, output_file)
logger.info('Logging out...')
kill_league_client()
kill_riot_client()
remove_lockfile(league_lockfile)
return True
except requests.RequestException as exp:
logger.error(f'{exp.__class__.__name__}. Retrying...')
except SKIP_ACCOUNT_EXCEPTIONS as exp:
logger.error(f'{exp.__class__.__name__}. Skipping account...')
kill_league_client()
kill_riot_client()
remove_lockfile(league_lockfile)
export_account(account, output_file)
return False
except (ClientException, LeagueConnectionError) as exp:
logger.error(f'{exp.__class__.__name__}. Retrying...')
kill_league_client()
kill_riot_client()
remove_lockfile(league_lockfile)
def execute_tasks(accounts, variables):
try:
variables['browse_button']['state'] = 'disabled'
variables['start_button']['state'] = 'disabled'
variables['input_path_entry']['state'] = 'disabled'
selected = []
config = get_config_from_entries(variables)
if config is None:
return
_, options_internal_names, options_mapped = get_options(config)
for option in options_internal_names:
if variables[f'checkbox_{option}'].get():
selected.append(option)
total = len(accounts)
logger.info(f'Found {total} account(s).')
for i, account in enumerate(accounts):
progress = i * 100 // total
variables['status'].set(f'{progress}% completed ({i}/{total}).')
username, password = account
logger.info(f'Working on account {username}...')
execute_tasks_single_account(username, password, selected, options_mapped)
variables['status'].set(f'100% completed ({total}/{total}).')
logger.info('Done.')
except Exception:
messagebox.showerror('Contact the developer', traceback.format_exc())
variables['status'].set('')
finally:
variables['browse_button']['state'] = 'normal'
variables['start_button']['state'] = 'normal'
variables['input_path_entry']['state'] = 'normal'
def execute_tasks_in_background(accounts, variables):
Thread(target=execute_tasks, daemon=True, args=(accounts, variables)).start()
|
cli.py | # -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock, Thread
import click
from . import __version__
from ._compat import iteritems, reraise
from .globals import current_app
from .helpers import get_debug_flag
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in 'app', 'application':
app = getattr(module, attr_name, None)
if app is not None and isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for k, v in iteritems(module.__dict__)
if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
raise NoAppException('Failed to find application in module "%s". Are '
'you sure it contains a Flask application? Maybe '
'you wrapped it in a WSGI middleware or you are '
'using a factory function.' % module.__name__)
def prepare_exec_for_file(filename):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
module = []
# Chop off file extensions or package markers
if os.path.split(filename)[1] == '__init__.py':
filename = os.path.dirname(filename)
elif filename.endswith('.py'):
filename = filename[:-3]
else:
raise NoAppException('The file provided (%s) does exist but is not a '
'valid Python file. This means that it cannot '
'be used as application. Please change the '
'extension to .py' % filename)
filename = os.path.realpath(filename)
dirpath = filename
while 1:
dirpath, extra = os.path.split(dirpath)
module.append(extra)
if not os.path.isfile(os.path.join(dirpath, '__init__.py')):
break
sys.path.insert(0, dirpath)
return '.'.join(module[::-1])
def locate_app(app_id):
"""Attempts to locate the application."""
__traceback_hide__ = True
if ':' in app_id:
module, app_obj = app_id.split(':', 1)
else:
module = app_id
app_obj = None
try:
__import__(module)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
stack_trace = traceback.format_exc()
raise NoAppException('There was an error trying to import'
' the app (%s):\n%s' % (module, stack_trace))
else:
raise NoAppException('The file/path provided (%s) does not appear'
' to exist. Please verify the path is '
'correct. If app is not on PYTHONPATH, '
'ensure the extension is .py' % module)
mod = sys.modules[module]
if app_obj is None:
app = find_best_app(mod)
else:
app = getattr(mod, app_obj, None)
if app is None:
raise RuntimeError('Failed to find application in module "%s"'
% module)
return app
def find_default_import_path():
app = os.environ.get('FLASK_APP')
if app is None:
return
if os.path.isfile(app):
return prepare_exec_for_file(app)
return app
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
message = 'Flask %(version)s\nPython %(python_version)s'
click.echo(message % {
'version': __version__,
'python_version': sys.version,
}, color=ctx.color)
ctx.exit()
version_option = click.Option(['--version'],
help='Show the flask version',
expose_value=False,
callback=get_version,
is_flag=True, is_eager=True)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None):
if create_app is None:
if app_import_path is None:
app_import_path = find_default_import_path()
self.app_import_path = app_import_path
else:
app_import_path = None
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
rv = self.create_app(self)
else:
if not self.app_import_path:
raise NoAppException(
'Could not locate Flask application. You did not provide '
'the FLASK_APP environment variable.\n\nFor more '
'information see '
'http://flask.pocoo.org/docs/latest/quickstart/')
rv = locate_app(self.app_import_path)
debug = get_debug_flag()
if debug is not None:
rv.debug = debug
self._loaded_app = rv
return rv
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info
and returns the loaded app.
"""
def __init__(self, add_default_commands=True, create_app=None,
add_version_option=True, **extra):
params = list(extra.pop('params', None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points('flask.commands'):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLASK')
return AppGroup.main(self, *args, **kwargs)
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=False,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads):
"""Runs a local development server for the Flask application.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments. By default it will
not support any sort of concurrency at all to simplify debugging. This
can be changed with the --with-threads option which will enable basic
multithreading.
The reloader and debugger are by default enabled if the debug flag of
Flask is enabled and disabled otherwise.
"""
from werkzeug.serving import run_simple
# Set a global flag that indicates that we were invoked from the
# command line interface provided server command. This is detected
# by Flask.run to make the call into a no-op. This is necessary to
# avoid ugly errors when the script that is loaded here also attempts
# to start a server.
os.environ['FLASK_RUN_FROM_CLI_SERVER'] = '1'
debug = get_debug_flag()
if reload is None:
reload = bool(debug)
if debugger is None:
debugger = bool(debug)
if eager_loading is None:
eager_loading = not reload
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
# Extra startup messages. This depends a bit on Werkzeug internals to
# not double execute when the reloader kicks in.
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
# If we have an import path we can print it out now which can help
# people understand what's being served. If we do not have an
# import path because the app was loaded through a callback then
# we won't print anything.
if info.app_import_path is not None:
print(' * Serving Flask app "%s"' % info.app_import_path)
if debug is not None:
print(' * Forcing debug mode %s' % (debug and 'on' or 'off'))
run_simple(host, port, app, use_reloader=reload,
use_debugger=debugger, threaded=with_threads)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configuring the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.debug and ' [debug]' or '',
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command('routes', short_help='Show the routes for the app.')
@click.option(
'--sort', '-s',
type=click.Choice(('endpoint', 'methods', 'rule', 'match')),
default='endpoint',
help=(
'Method to sort routes by. "match" is the order that Flask will match '
'routes when dispatching a request.'
)
)
@click.option(
'--all-methods',
is_flag=True,
help="Show HEAD and OPTIONS methods."
)
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
ignored_methods = set(() if all_methods else ('HEAD', 'OPTIONS'))
if sort in ('endpoint', 'rule'):
rules = sorted(rules, key=attrgetter(sort))
elif sort == 'methods':
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [
', '.join(sorted(rule.methods - ignored_methods)) for rule in rules
]
headers = ('Endpoint', 'Methods', 'Rule')
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = '{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}'.format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*('-' * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(help="""\
This shell command acts as general utility script for Flask applications.
It loads the application configured (through the FLASK_APP environment
variable) and then provides commands either provided by the application or
Flask itself.
The most useful commands are the "run" and "shell" command.
Example usage:
\b
%(prefix)s%(cmd)s FLASK_APP=hello.py
%(prefix)s%(cmd)s FLASK_DEBUG=1
%(prefix)sflask run
""" % {
'cmd': os.name == 'posix' and 'export' or 'set',
'prefix': os.name == 'posix' and '$ ' or '',
})
def main(as_module=False):
this_module = __package__ + '.cli'
args = sys.argv[1:]
if as_module:
if sys.version_info >= (2, 7):
name = 'python -m ' + this_module.rsplit('.', 1)[0]
else:
name = 'python -m ' + this_module
# This module is always executed as "python -m flask.run" and as such
# we need to ensure that we restore the actual command line so that
# the reloader can properly operate.
sys.argv = ['-m', this_module] + sys.argv[1:]
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
test_lab_view.py | import time
from multiprocessing import Process
from threading import Thread
from traceback import print_exc
from unittest import TestCase
from bson import ObjectId
from alab_management.device_view import DeviceView
from alab_management.lab_view import LabView
from alab_management.sample_view import SampleView
from alab_management.scripts.cleanup_lab import cleanup_lab
from alab_management.scripts.setup_lab import setup_lab
from alab_management.task_manager import TaskManager
from alab_management.task_view import TaskView
def launch_task_manager():
try:
task_manager = TaskManager()
task_manager.run()
except Exception:
print(print_exc())
raise
class TestLabView(TestCase):
def setUp(self) -> None:
cleanup_lab(all_collections=True, _force_i_know_its_dangerous=True)
setup_lab()
self.device_view = DeviceView()
self.device_list = self.device_view._device_list
self.sample_view = SampleView()
self.task_view = TaskView()
self.process = Process(target=launch_task_manager)
self.process.daemon = True
self.process.start()
time.sleep(1)
def tearDown(self) -> None:
self.process.terminate()
cleanup_lab(all_collections=True, _force_i_know_its_dangerous=True)
time.sleep(1)
def test_request_resources(self):
device_types = {device.__name__: device
for device in {device.__class__ for device in self.device_list.values()}}
Furnace = device_types["Furnace"]
RobotArm = device_types["RobotArm"]
task_id = self.task_view.create_task(**{
"task_type": "Heating",
"samples": {"sample": ObjectId()},
"parameters": {"setpoints": [[10, 600]]}
})
lab_view = LabView(task_id=task_id)
with lab_view.request_resources({Furnace: ["$/inside"], RobotArm: [], None: [{"prefix": "furnace_table",
"number": 1}]}) \
as (devices, sample_positions):
self.assertDictEqual({Furnace: {"$/inside": ["furnace_1/inside"]}, RobotArm: {},
None: {"furnace_table": ["furnace_table"]}}, sample_positions)
self.assertEqual("OCCUPIED", self.device_view.get_status("furnace_1").name)
self.assertEqual("OCCUPIED", self.device_view.get_status("dummy").name)
self.assertEqual("LOCKED", self.sample_view.get_sample_position_status("furnace_1/inside")[0].name)
self.assertEqual("LOCKED", self.sample_view.get_sample_position_status("furnace_table")[0].name)
time.sleep(1)
self.assertEqual("IDLE", self.device_view.get_status("furnace_1").name)
self.assertEqual("IDLE", self.device_view.get_status("dummy").name)
self.assertEqual("EMPTY", self.sample_view.get_sample_position_status("furnace_1/inside")[0].name)
self.assertEqual("EMPTY", self.sample_view.get_sample_position_status("furnace_table")[0].name)
def test_request_resources_empty(self):
task_id = self.task_view.create_task(**{
"task_type": "Heating",
"samples": {"sample": ObjectId()},
"parameters": {"setpoints": [[10, 600]]}
})
lab_view = LabView(task_id=task_id)
with lab_view.request_resources({}) as (devices, sample_positions):
self.assertDictEqual({}, devices)
self.assertEqual({}, sample_positions)
|
mqtt_s3_multi_clients_comm_manager.py | # -*-coding:utf-8-*-
import json
import logging
import time
import traceback
import uuid
from typing import List
import paho.mqtt.client as mqtt
import yaml
from .remote_storage import S3Storage
from ..base_com_manager import BaseCommunicationManager
from ..message import Message
from ..observer import Observer
class MqttS3MultiClientsCommManager(BaseCommunicationManager):
def __init__(
self,
config_path,
s3_config_path,
topic="fedml",
client_rank=0,
client_num=0,
args=None,
bind_port=0,
):
client_objects_str = str(args.client_id_list).replace('"', '"')
client_objects_str = client_objects_str.replace("'", "")
logging.info("origin client object " + str(args.client_id_list))
logging.info("client object " + client_objects_str)
self.client_id_list = json.loads(client_objects_str)
self._topic = "fedml_" + str(topic) + "_"
self.s3_storage = S3Storage(s3_config_path)
self.client_real_ids = []
if args.client_id_list is not None:
logging.info(
"MqttS3CommManager args client_id_list: " + str(args.client_id_list)
)
self.client_real_ids = json.loads(args.client_id_list)
if args.rank == 0:
self.edge_id = 0
else:
if len(self.client_real_ids) == 1:
self.edge_id = self.client_real_ids[0]
else:
self.edge_id = 0
self.model_params_key_map = list()
self._unacked_sub = list()
self._observers: List[Observer] = []
if client_rank is None:
self._client_id = mqtt.base62(uuid.uuid4().int, padding=22)
else:
self._client_id = client_rank
self.client_num = client_num
logging.info("mqtt_s3.init: client_num = %d" % client_num)
self.set_config_from_file(config_path)
self.set_config_from_objects(config_path)
# Construct a Client
self.mqtt_connection_id = mqtt.base62(uuid.uuid4().int, padding=22)
self._client = mqtt.Client(
client_id=str(self.mqtt_connection_id), clean_session=True
)
self._client.enable_logger()
self._client.on_connect = self._on_connect
self._client.on_disconnect = self._on_disconnect
self._client.on_message = self._on_message
self._client.on_subscribe = self._on_subscribe
# self._client.on_log = self.on_log
self._client.username_pw_set(self.mqtt_user, self.mqtt_pwd)
_will_msg = {"ID": f"{self.mqtt_connection_id}", "stat": "Online"}
self._client.will_set(
"W/topic", payload=json.dumps(_will_msg), qos=0, retain=True
)
logging.info(
"mqtt_s3.init: connecting to MQTT server(local port %d..." % bind_port
)
self._client.connect(
self.broker_host, self.broker_port, 180, bind_port=bind_port
)
def on_log(self, mqttc, obj, level, string):
logging.info("mqtt_s3.on_log: " + string)
def run_loop_forever(self):
self._client.loop_forever()
def __del__(self):
self._client.loop_stop()
self._client.disconnect()
@property
def client_id(self):
return self._client_id
@property
def topic(self):
return self._topic
def _on_connect_impl(self, client, userdata, flags, rc):
"""
[server]
sending message topic (publish): serverID_clientID
receiving message topic (subscribe): clientID
[client]
sending message topic (publish): clientID
receiving message topic (subscribe): serverID_clientID
"""
logging.info(
"mqtt_s3.on_connect: connection returned with result code:" + str(rc)
)
# subscribe one topic
if self.client_id == 0:
# server
for client_rank in range(0, self.client_num):
real_topic = self._topic + str(self.client_real_ids[client_rank])
result, mid = client.subscribe(real_topic, 0)
self._unacked_sub.append(mid)
logging.info(
"mqtt_s3.on_connect: subscribes real_topic = %s, mid = %s, result = %s"
% (real_topic, mid, str(result))
)
self._notify_connection_ready()
logging.info("mqtt_s3.on_connect: server subscribes")
else:
# client
real_topic = self._topic + str(0) + "_" + str(self.client_real_ids[0])
result, mid = client.subscribe(real_topic, 0)
self._unacked_sub.append(mid)
self._notify_connection_ready()
logging.info(
"mqtt_s3.on_connect: client subscribes real_topic = %s, mid = %s, result = %s"
% (real_topic, mid, str(result))
)
def _on_connect(self, client, userdata, flags, rc):
try:
self._on_connect_impl(client, userdata, flags, rc)
except:
traceback.print_exc()
quit(0)
@staticmethod
def _on_disconnect(client, userdata, rc):
logging.info(
"mqtt_s3.on_disconnect: disconnection returned result %s, user data %s"
% (str(rc), str(userdata))
)
def _on_subscribe(self, client, userdata, mid, granted_qos):
logging.info("mqtt_s3.onSubscribe: mid = %s" % str(mid))
self._unacked_sub.remove(mid)
def add_observer(self, observer: Observer):
self._observers.append(observer)
def remove_observer(self, observer: Observer):
self._observers.remove(observer)
def _notify_connection_ready(self):
msg_params = Message()
MSG_TYPE_CONNECTION_IS_READY = 0
msg_type = MSG_TYPE_CONNECTION_IS_READY
for observer in self._observers:
observer.receive_message(msg_type, msg_params)
def _notify(self, msg_obj):
msg_params = Message()
msg_params.init_from_json_object(msg_obj)
msg_type = msg_params.get_type()
logging.info("mqtt_s3.notify: msg type = %d" % msg_type)
for observer in self._observers:
observer.receive_message(msg_type, msg_params)
def _on_message_impl(self, client, userdata, msg):
logging.info("--------------------------")
json_payload = str(msg.payload, encoding="utf-8")
payload_obj = json.loads(json_payload)
sender_id = payload_obj.get(Message.MSG_ARG_KEY_SENDER, "")
receiver_id = payload_obj.get(Message.MSG_ARG_KEY_RECEIVER, "")
s3_key_str = payload_obj.get(Message.MSG_ARG_KEY_MODEL_PARAMS, "")
s3_key_str = str(s3_key_str).strip(" ")
if self.client_id == 0:
channel_id = sender_id
else:
channel_id = self.edge_id
if s3_key_str != "":
logging.info(
"mqtt_s3.on_message: use s3 pack, s3 message key %s" % s3_key_str
)
# read S3 object
# s3_obj = self.s3_storage.read_json(s3_key_str)
# model_params = str(s3_obj, encoding="utf-8")
# model_params = json.loads(model_params)
logging.info("mqtt_s3.on_message: from python client.")
model_params = self.s3_storage.read_model(s3_key_str)
logging.info(
"mqtt_s3.on_message: model params length %d" % len(model_params)
)
# replace the S3 object key with raw model params
payload_obj[Message.MSG_ARG_KEY_MODEL_PARAMS] = model_params
else:
logging.info("mqtt_s3.on_message: not use s3 pack")
self._notify(payload_obj)
def _on_message(self, client, userdata, msg):
try:
self._on_message_impl(client, userdata, msg)
except:
traceback.print_exc()
quit(0)
def send_message(self, msg: Message):
"""
[server]
sending message topic (publish): fedml_runid_serverID_clientID
receiving message topic (subscribe): fedml_runid_clientID
[client]
sending message topic (publish): fedml_runid_clientID
receiving message topic (subscribe): fedml_runid_serverID_clientID
"""
logging.info("mqtt_s3.send_message: starting...")
sender_id = msg.get_sender_id()
receiver_id = msg.get_receiver_id()
if self.client_id == 0:
# topic = "fedml" + "_" + "run_id" + "_0" + "_" + "client_id"
topic = self._topic + str(0) + "_" + str(receiver_id)
logging.info("mqtt_s3.send_message: msg topic = %s" % str(topic))
payload = msg.get_params()
model_params_obj = payload.get(Message.MSG_ARG_KEY_MODEL_PARAMS, "")
message_key = topic + "_" + str(uuid.uuid4())
if model_params_obj != "":
# S3
logging.info(
"mqtt_s3.send_message: S3+MQTT msg sent, s3 message key = %s"
% message_key
)
# self.s3_storage.write_json(message_key, json.dumps(model_params_obj))
# model_uploaded = False
# for model_params_key_item in self.model_params_key_map:
# if model_params_obj == model_params_key_item["obj"]:
# model_uploaded = True
# model_params_key_url = model_params_key_item
# break
# if not model_uploaded:
logging.info("mqtt_s3.send_message: to python client.")
model_url = self.s3_storage.write_model(message_key, model_params_obj)
model_params_key_url = {
"key": message_key,
"url": model_url,
"obj": model_params_obj,
}
# self.model_params_key_map.append(model_params_key_url)
payload[Message.MSG_ARG_KEY_MODEL_PARAMS] = model_params_key_url["key"]
payload[Message.MSG_ARG_KEY_MODEL_PARAMS_URL] = model_params_key_url[
"url"
]
self._client.publish(topic, payload=json.dumps(payload))
else:
# pure MQTT
logging.info("mqtt_s3.send_message: MQTT msg sent")
self._client.publish(topic, payload=json.dumps(payload))
else:
# client
topic = self._topic + str(msg.get_sender_id())
message_key = topic + "_" + str(uuid.uuid4())
payload = msg.get_params()
model_params_obj = payload.get(Message.MSG_ARG_KEY_MODEL_PARAMS, "")
if model_params_obj != "":
# S3
logging.info(
"mqtt_s3.send_message: S3+MQTT msg sent, message_key = %s"
% message_key
)
# self.s3_storage.write_json(message_key, json.dumps(model_params_obj))
# model_uploaded = False
# for model_params_key_item in self.model_params_key_map:
# if model_params_obj == model_params_key_item["obj"]:
# model_uploaded = True
# model_params_key_url = model_params_key_item
# break
# if not model_uploaded:
logging.info("mqtt_s3.send_message: to python client.")
model_url = self.s3_storage.write_model(message_key, model_params_obj)
model_params_key_url = {
"key": message_key,
"url": model_url,
"obj": model_params_obj,
}
# self.model_params_key_map.append(model_params_key_url)
payload[Message.MSG_ARG_KEY_MODEL_PARAMS] = model_params_key_url["key"]
payload[Message.MSG_ARG_KEY_MODEL_PARAMS_URL] = model_params_key_url[
"url"
]
self._client.publish(topic, payload=json.dumps(payload))
else:
logging.info("mqtt_s3.send_message: MQTT msg sent")
self._client.publish(topic, payload=json.dumps(payload))
def send_message_json(self, topic_name, json_message):
self._client.publish(topic_name, payload=json_message)
def handle_receive_message(self):
self.run_loop_forever()
# multiprocessing.Process(target=self.run_loop_forever).start()
# self.is_running = True
# while self.is_running:
# time.sleep(0.003)
# logging.info("mqtt_s3.handle_receive_message: completed...")
def stop_receive_message(self):
logging.info("mqtt_s3.stop_receive_message: stopping...")
self._client.loop_stop()
self._client.disconnect()
def set_config_from_file(self, config_file_path):
try:
with open(config_file_path, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
self.broker_host = config["BROKER_HOST"]
self.broker_port = config["BROKER_PORT"]
self.mqtt_user = None
self.mqtt_pwd = None
if "MQTT_USER" in config:
self.mqtt_user = config["MQTT_USER"]
if "MQTT_PWD" in config:
self.mqtt_pwd = config["MQTT_PWD"]
except Exception as e:
pass
def set_config_from_objects(self, mqtt_config):
self.broker_host = mqtt_config["BROKER_HOST"]
self.broker_port = mqtt_config["BROKER_PORT"]
self.mqtt_user = None
self.mqtt_pwd = None
if "MQTT_USER" in mqtt_config:
self.mqtt_user = mqtt_config["MQTT_USER"]
if "MQTT_PWD" in mqtt_config:
self.mqtt_pwd = mqtt_config["MQTT_PWD"]
if __name__ == "__main__":
class Obs(Observer):
def receive_message(self, msg_type, msg_params) -> None:
print("receive_message(%s, %s)" % (msg_type, msg_params.to_string()))
mqtt_config = (
"../../../../fedml_experiments/distributed/fedavg_cross_silo/mqtt_config.yaml"
)
s3_config = (
"../../../../fedml_experiments/distributed/fedavg_cross_silo/s3_config.yaml"
)
client = MqttS3MultiClientsCommManager(
mqtt_config, s3_config, topic="fedml_168_", client_id=1, client_num=1
)
client.add_observer(Obs())
time.sleep(3)
print("client ID:%s" % client.client_id)
message = Message(0, 1, 2)
message.add_params("key1", 1)
client.send_message(message)
time.sleep(10)
print("client, finished to send...")
|
websiteCheck.py | from dotenv import load_dotenv
from WebsiteChecker.WCKafka import KafkaProducer
from WebsiteChecker.Website import website
import os
import signal
import sys
import logging
import time
import threading
logger = logging.getLogger(__name__)
num_threads = 0
thread_running = False
# Lock for the Website-List
lock_ws = threading.Lock()
# Lock for thread-handling
lock_threads = threading.Lock()
websites = []
# Signal-handler for SIGINT and SIGTERM
def handler_int(signum, frame):
logger.info("WebsiteCheck got interupted")
# simply exit
sys.exit(0)
# Measure the websites and publish the results to Kafka
def check_websites():
global lock_ws, lock_threads, thread_running, num_threads, websites
lock_threads.acquire()
num_threads += 1
thread_running = True
lock_threads.release()
# get all necessary config-values from the environemt
bootstrap_servers = os.getenv("KAFKA_BOOTSTRAP_SERVER")
security_protocol = os.getenv("KAFKA_SECURITY_PROTOCOL")
ssl_cafile = os.getenv("KAFKA_SSL_CAFILE")
ssl_certfile = os.getenv("KAFKA_SSL_CERTFILE")
ssl_keyfile = os.getenv("KAFKA_SSL_KEYFILE")
topic = os.getenv("WC_TOPIC")
# start the KafkaProducer
producer = KafkaProducer.WcKafkaProducer(bootstrap_servers,
security_protocol, ssl_cafile,
ssl_certfile, ssl_keyfile)
# create a new topic
producer.create_topic(topic)
# get measurements of the configured websites
while True:
# website-list is safed with a lock to prevent simultaneous access
lock_ws.acquire()
for ws in websites:
ws.get_response()
# publish results
producer.send(topic, ws.as_json().encode("utf-8"))
lock_ws.release()
producer.flush()
# measure every 10 seconds
time.sleep(int(os.getenv("POLL_INTERVALL")))
lock_threads.acquire()
num_threads -= 1
lock_threads.release()
# get all websites which should get checked
# for simplicity this is hardcoded in this demo.
def read_websites():
global lock_ws, lock_threads, thread_running, num_threads, websites
lock_threads.acquire()
num_threads += 1
thread_running = True
lock_threads.release()
ws1 = website.Website("https://vpn.haas.works", regex="Werben")
ws2 = website.Website("http://www.google.de", regex="Werben")
ws3 = website.Website("http://www.haribo.de")
# website-list is safed with a lock to prevent simultaneous access
lock_ws.acquire()
websites.append(ws1)
websites.append(ws2)
websites.append(ws3)
lock_ws.release()
lock_threads.acquire()
num_threads -= 1
lock_threads.release()
def main():
# load environment configuration from .env file
load_dotenv(verbose=True)
# set the loglevel
loglevel = getattr(logging, os.getenv("LOGLEVEL").upper())
if not isinstance(loglevel, int):
print(f"{os.getenv('LOGLEVEL')} as LOGLEVEL is not valid. "
"Using WARNING instead")
loglevel = getattr(logging, "WARNING")
logging.basicConfig(level=loglevel)
# set signal handlers
signal.signal(signal.SIGINT, handler_int)
signal.signal(signal.SIGTERM, handler_int)
# create and start website-reader thread
reader_thread = threading.Thread(target=read_websites)
reader_thread.start()
# create and start website-checker thread
checker_thread = threading.Thread(target=check_websites)
checker_thread.start()
main()
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7554
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
main.py | from instapy_cli import client
from os import listdir
from os.path import isfile, join
import time
from tkinter import *
import tkinter as tk
from tkinter import filedialog
import threading
from PIL import ImageTk,Image
from tkinter.ttk import Progressbar
window = Tk()
window.geometry("500x400")
window.title("IMPU | 1.1")
def choose():
global filename
filename = filedialog.askdirectory()
print(filename)
global onlyfiles
onlyfiles = [f for f in listdir
(filename) if isfile(join(filename, f))]
global length_onlyfiles
length_onlyfiles = len(onlyfiles)
print(length_onlyfiles)
e.delete(0,END)
e.insert(0,filename)
global gaythanos
gaythanos = ("")
def upload():
quantity = 0
username = e_login.get()
password = e_password.get()
progress=Progressbar(window,orient=HORIZONTAL,length=100,
mode='determinate')
progress["maximum"] = length_onlyfiles
progress.place(y=190,x=197)
with client(username, password) as cli:
while quantity < length_onlyfiles:
file_da_caricare = (filename +"/"+ onlyfiles[quantity])
info_file1 = str(quantity+1)
info_quantity = str(length_onlyfiles)
info_file2 = ("["+info_file1+"/"+info_quantity+"] "+file_da_caricare)
print(info_file2)
time.sleep(1)
cli.upload(file_da_caricare)
quantity += 1
global gaythanos
bo2 = str(quantity+1)
gaythanos = ("Uploading... "+"["+bo2+"/"+info_quantity+"] ")
progress['value']=quantity
def start_submit_thread(event):
global submit_thread
submit_thread = threading.Thread(target=upload)
submit_thread.daemon = True
submit_thread.start()
window.after(20, check_submit_thread)
def progress(currentValue):
progressbar["value"]=currentValue
def check_submit_thread():
file_label.config(text=gaythanos)
if submit_thread.is_alive():
window.after(20, check_submit_thread)
else:
file_label.config(text="Upload Complete")
def stop_upload_thread():
if submit_thread.is_alive():
submit_thread.stop()
print("Thread stopped")
stop_Button.delete()
else:
print("something occured")
file_label=Label(window,text="")
file_label.place(y=170,x=167)
def on_entry_click(event):
if e_login.get() == 'Login...':
e_login.delete(0, "end")
e_login.insert(0, '')
e_login.config(fg = 'black')
def on_focusout(event):
if e_login.get() == '':
e_login.insert(0, 'Login...')
e_login.config(fg = 'grey')
e_login = Entry(window,width=20)
e_login.pack()
e_login.insert(0, 'Login...')
e_login.bind('<FocusIn>', on_entry_click)
e_login.bind('<FocusOut>', on_focusout)
e_login.config(fg = 'grey')
def on_entry_click(event):
if e_password.get() == 'Password...':
e_password.delete(0, "end")
e_password.insert(0, '')
e_password.config(fg = 'black',show= '*')
def on_focusout(event):
if e_password.get() == '':
e_password.insert(0, 'Password...')
e_password.config(fg = 'grey',show= '')
e_password = Entry(window,width=20)
e_password.pack()
e_password.insert(0, 'Password...')
e_password.bind('<FocusIn>', on_entry_click)
e_password.bind('<FocusOut>', on_focusout)
e_password.config(fg = 'grey')
def show_password():
show_password_get = check_password_var.get()
if show_password_get == True:
e_password.config(show='')
if show_password_get == False:
e_password.config(show='*')
check_password_var = tk.BooleanVar()
check_password = Checkbutton(window, text="show", var=check_password_var,
command=show_password)
check_password.place(y=17,x=335)
e = Entry(window,width=20)
e.place(y = 100, x =167)
Button(window, text="choose folder", command=choose).place(y=98, x=335)
Button(window, text="Upload",
command=lambda:start_submit_thread(None)).place(y=130,x=210)
window.mainloop()
|
Binance_Detect_Moonings.py | """
Disclaimer
All investment strategies and investments involve risk of loss.
Nothing contained in this program, scripts, code or repositoy should be
construed as investment advice.Any reference to an investment's past or
potential performance is not, and should not be construed as, a recommendation
or as a guarantee of any specific outcome or profit.
By using this program you accept all liabilities,
and that no claims can be made against the developers,
or others connected with the program.
"""
# use for environment variables
import os
import math
from decimal import Decimal
# use if needed to pass args to external modules
import sys
# used to create threads & dynamic loading of modules
import threading
import multiprocessing
import importlib
# used for directory handling
import glob
#discord needs import request
import requests
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API / websockets / Exception handling
from binance.client import Client
from binance.exceptions import BinanceAPIException
from binance.helpers import round_step_size
from requests.exceptions import ReadTimeout, ConnectionError
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, test_api_key
)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
# print with timestamps
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
# tracks profit/loss each session
global session_profit, trade_wins, trade_losses, sellall, PriceChange, Pending_sum, Pending_perc
global WIN_LOSS_PERCENT, coins_up, coins_down, coins_unchanged
session_profit = 0
bot_started_datetime = datetime.now()
trade_wins = 0
trade_losses = 0
sellall = ''
PriceChange = 0.0
Pending_sum = 0.0
Pending_perc = 0.0
WIN_LOSS_PERCENT = 0.0
coins_up = 0
coins_down = 0
coins_unchanged = 0
def stop_signal_threads():
try:
for signalthread in signalthreads:
print(f'Terminating thread {str(signalthread.name)}')
signalthread.terminate()
except:
pass
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
if CUSTOM_LIST:
if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head += 1
if hsp_head == RECHECK_INTERVAL:
hsp_head = 0
historical_prices[hsp_head] = initial_price
return initial_price
def print_stats(PriceChange,Pending_sum,Pending_perc,coins_up,coins_down,coins_unchanged):
if trade_wins+trade_losses > 0:
Profit_Per_Trade = (session_profit/(trade_wins+trade_losses))
else:
Profit_Per_Trade = 0
print(f'')
print(f'--------')
print(f'')
print(f'Working...')
# https://algotrading101.com/learn/binance-python-api-guide/
# Gets all the coin balances on Binance
# print(client.get_account())
if TEST_MODE:
print(f'Mode : Test mode, {txcolors.SELL_PROFIT}no real money is used')
else:
print(f'Mode : {txcolors.WARNING}You are using REAL money!')
print(f'Session profit : {txcolors.SELL_PROFIT if session_profit > 0. else txcolors.SELL_LOSS}{session_profit:.2f}% Est : ${(QUANTITY * session_profit)/100:.2f}, ~{Profit_Per_Trade:.2f}% per trade {txcolors.DEFAULT}')
print(f'Pending profit :{txcolors.SELL_PROFIT if Pending_perc > 0. else txcolors.SELL_LOSS} {Pending_perc:.2f}%, {Pending_sum:.2f} USDT{txcolors.DEFAULT}')
print(f'Overall profit :{txcolors.SELL_PROFIT if session_profit+Pending_perc > 0. else txcolors.SELL_LOSS} {(session_profit+Pending_perc):.2f}%, {(QUANTITY * (session_profit+Pending_perc))/100:.2f} USDT')
print(f'Trades total : {trade_wins+trade_losses}, Wins {trade_wins}, Losses {trade_losses}, Win ratio {WIN_LOSS_PERCENT}%')
print('Last sell :',last_sell_datetime)
print(f'Started : {bot_started_datetime} | Run time : {datetime.now() - bot_started_datetime}')
if MAX_COINS > 0:
print(f'Coins Currently : {len(coins_bought)}/{MAX_COINS} ({float(len(coins_bought)*QUANTITY):g}/{float(MAX_COINS*QUANTITY):g} {PAIR_WITH})')
else:
print(f'Coins Currently : {len(coins_bought)}/0 ({float(len(coins_bought)*QUANTITY):g}/0 {PAIR_WITH})')
print(f'Coin\'s Status : {txcolors.SELL_PROFIT}Up {coins_up}, {txcolors.SELL_LOSS}Down: {coins_down}{txcolors.DEFAULT}, Unchanged: {coins_unchanged}')
print(f'Stop Loss : {STOP_LOSS}%')
print(f'Take Profit : {TAKE_PROFIT}%')
print('Use TSL :',USE_TRAILING_STOP_LOSS, end = '')
if USE_TRAILING_STOP_LOSS: print(f', TSL {TRAILING_STOP_LOSS}%, TTP {TRAILING_TAKE_PROFIT}%')
else: print(f'')
print('Used sig. mod(s):', end=" ")
if len(SIGNALLING_MODULES) > 0:
#for module in SIGNALLING_MODULES:
#print(module, end=" ")
print(*tuple(module for module in SIGNALLING_MODULES))
#print(f'')
print(f'')
print(f'--------')
print(f'')
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff, WIN_LOSS_PERCENT
global coins_up,coins_down,coins_unchanged
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
WIN_LOSS_PERCENT = 0
pause_bot()
if historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'])).total_seconds())
# truncating some of the above values to the correct decimal places before printing
if (trade_wins > 0) and (trade_losses > 0):
WIN_LOSS_PERCENT = round((trade_wins / (trade_wins+trade_losses)) * 100, 2)
if (trade_wins > 0) and (trade_losses == 0):
WIN_LOSS_PERCENT = 100
#print(f'Wins : {trade_wins}, Losses : {trade_losses}, {WIN_LOSS_PERCENT}% ')
load_settings()
# retreive latest prices
get_price()
if MAX_COINS < 1:
print(f'')
print(f'{txcolors.WARNING}MAX_COINS is set to zero or below({MAX_COINS}), no coins will be bought.')
print(f'{txcolors.WARNING}If you want the bot to buy more coins, set MAX_COINS > {len(coins_bought) if len(coins_bought) > 0. else 0} and save settings file !')
if MAX_COINS == -1:
print(f'')
print(f'{txcolors.WARNING}The bot is set to terminate after all the coins are sold')
if len(coins_bought) == 0:
print(f'')
print(f'{txcolors.WARNING}All the coins are sold, terminating the bot now')
# stop external signal threads
stop_signal_threads()
sys.exit(0)
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than MAX_COINS is not reached.
if threshold_check > CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < MAX_COINS: # or MAX_COINS == 0
volatile_coins[coin] = round(threshold_check, 3)
print(f'{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minute(s), calculating volume in {PAIR_WITH}')
else:
if MAX_COINS > 0:
print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minute(s), but you are holding max number of coins{txcolors.DEFAULT}')
elif threshold_check < CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Here goes new code for external signalling
externals = external_signals()
exnumber = 0
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and \
(len(coins_bought) + exnumber + len(volatile_coins)) < MAX_COINS:
volatile_coins[excoin] = 1
exnumber +=1
print(f'External signal received on {excoin}, calculating volume in {PAIR_WITH}')
print_stats(PriceChange,Pending_sum,Pending_perc,coins_up,coins_down,coins_unchanged)
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file{txcolors.DEFAULT}')
return external_list
def pause_bot():
'''Pause the script when exeternal indicators detect a bearish trend in the market'''
global bot_paused, session_profit, hsp_head
# start counting for how long the bot's been paused
start_time = time.perf_counter()
while os.path.isfile("signals/paused.exc"):
if bot_paused == False:
print(f'{txcolors.WARNING}Pausing buying due to change in market conditions, stop loss and take profit will continue to work...{txcolors.DEFAULT}')
bot_paused = True
# Sell function needs to work even while paused
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
get_price(True)
# pausing here
if hsp_head == 1:
print(f'')
print(f'Paused...')
#Session profit : {session_profit:.2f}% Est : ${(QUANTITY * session_profit)/100:.2f}')
print_stats(PriceChange,Pending_sum,Pending_perc,coins_up,coins_down,coins_unchanged)
time.sleep((TIME_DIFFERENCE * 60) / RECHECK_INTERVAL)
else:
# stop counting the pause time
stop_time = time.perf_counter()
time_elapsed = timedelta(seconds=int(stop_time-start_time))
# resume the bot and ser pause_bot to False
if bot_paused == True:
print(f'{txcolors.WARNING}Resuming buying due to change in market conditions, total sleep time : {time_elapsed}{txcolors.DEFAULT}')
bot_paused = False
return
def convert_volume():
'''Converts the volume given in QUANTITY from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from QUANTITY in USDT (default)
volume[coin] = float(QUANTITY / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def dropzeros(number):
mynum = Decimal(number).normalize()
# e.g 22000 --> Decimal('2.2E+4')
return mynum.__trunc__() if not mynum % 1 else float(mynum)
def remove_zeros(num):
nums = list(num)
indexes = (list(reversed(range(len(nums)))))
for i in indexes:
if nums[i] == '0':
del nums[-1]
else:
break
return "".join(nums)
def buy():
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
for coin in volume:
# only buy if the there are no active trades on the coin
if coin not in coins_bought:
volume[coin] = math.floor(volume[coin]*10000)/10000
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}")
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
continue
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
else:
print(f'Buy signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume
def sell_all(msgreason, session_tspl_ovr = False):
global sell_all_coins, PriceChange
#msg_discord(f'SELL ALL COINS: {msgreason}')
# stop external signals so no buying/selling/pausing etc can occur
stop_signal_threads()
# sell all coins NOW!
sell_all_coins = True
coins_sold = sell_coins(session_tspl_ovr)
remove_from_portfolio(coins_sold)
# display final info to screen
last_price = get_price()
#discordmsg = balance_report(last_price)
#msg_discord(discordmsg)
def sell_all_coins(msg=''):
global session_tspl_ovr
with open(coins_bought_file_path, 'r') as f:
coins_bought = json.load(f)
total_profit = 0
total_price_change = 0
if not TEST_MODE:
for coin in list(coins_bought):
sell_coin = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume'])
BuyPrice = float(coins_bought[coin]['bought_at'])
LastPrice = float(sell_coin['fills'][0]['price'])
profit = (LastPrice - BuyPrice) * coins_bought[coin]['volume']
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
total_profit += profit
total_price_change += PriceChange
coins_sold = sell_coins(session_tspl_ovr)
remove_from_portfolio(coins_sold)
text_color = txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS
console_log_text = f"{text_color}Sell: {coins_bought[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit:.2f} {PriceChange:.2f}%{txcolors.DEFAULT}"
print(console_log_text)
if LOG_TRADES:
timestamp = datetime.now().strftime("%d/%m %H:%M:%S")
write_log(f"Sell: {coins_bought[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit:.2f} {PriceChange:.2f}%")
total_profit += Pending_sum
total_price_change += Pending_perc
print(f'')
print(f'Pending Profit: {Pending_perc}%, {Pending_sum} USDT')
text_color = txcolors.SELL_PROFIT if total_price_change >= 0. else txcolors.SELL_LOSS
print(f"Total Profit: {text_color}{total_profit:.2f}{txcolors.DEFAULT}. Total Price Change: {text_color}{total_price_change:.2f}%{txcolors.DEFAULT}")
with open(coins_bought_file_path, 'r') as f:
coins_bought = json.load(f)
#coins_bought = {}
#os.remove(coins_bought_file_path)
def sell_coins():
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit, trade_wins, trade_losses, Pending_sum, Pending_perc, PriceChange, last_sell_datetime
Pending_sum = 0.0
Pending_perc = 0.0
last_price = get_price(False) # don't populate rolling window
#last_price = get_price(add_to_historical=True) # don't populate rolling window
coins_sold = {}
#print(f'')
for coin in list(coins_bought):
# define stop loss and take profit
TP = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['take_profit']) / 100
SL = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['stop_loss']) / 100
LastPrice = float(last_price[coin]['price'])
BuyPrice = float(coins_bought[coin]['bought_at'])
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
Pending_perc += PriceChange-(TRADING_FEE*2)
Pending_sum += (QUANTITY*(PriceChange-(TRADING_FEE*2)))/100
# check that the price is above the take profit and readjust SL and TP accordingly if trailing stop loss used
if LastPrice > TP and USE_TRAILING_STOP_LOSS:
# increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL)
coins_bought[coin]['take_profit'] = PriceChange + TRAILING_TAKE_PROFIT
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
if DEBUG: print(f"{coin} TP reached, adjusting TP {coins_bought[coin]['take_profit']:.2f} and SL {coins_bought[coin]['stop_loss']:.2f} accordingly to lock-in profit, , SL {SL}, TP {TP}")
continue
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
if ((LastPrice < SL or LastPrice > TP) and not USE_TRAILING_STOP_LOSS) or (LastPrice < SL and USE_TRAILING_STOP_LOSS):
print(f"{txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}TP or SL reached, selling {coins_bought[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} : {PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}, SL {SL:.2f}, TP {TP:.2f}")
last_sell_datetime = datetime.now()
# try to create a real order
try:
if not TEST_MODE:
sell_coins_limit = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if coin has been sold and create a dict for each coin sold
else:
coins_sold[coin] = coins_bought[coin]
# prevent system from buying this coin for the next TIME_DIFFERENCE minutes
volatility_cooloff[coin] = datetime.now()
if (LastPrice+TRADING_FEE) >= (BuyPrice+TRADING_FEE):
trade_wins += 1
else:
trade_losses += 1
# Log trade
if LOG_TRADES:
# adjust for trading fee here
profit = ((LastPrice - BuyPrice) * coins_sold[coin]['volume'])* (1-(TRADING_FEE*2))
write_log(f"Sell: {coins_sold[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit:.2f} {PriceChange-(TRADING_FEE*2):.2f}%")
session_profit=session_profit + (PriceChange-(TRADING_FEE*2))
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
if len(coins_bought) > 0:
print(f"TP or SL not yet reached, not selling {coin} for now {BuyPrice} - {LastPrice} : {txcolors.SELL_PROFIT if PriceChange >= 0.0 else txcolors.SELL_LOSS}{PriceChange-(TRADING_FEE*2):.2f}% Est:${(QUANTITY*(PriceChange-(TRADING_FEE*2)))/100:.2f}{txcolors.DEFAULT}, SL {coins_bought[coin]['stop_loss']:.2f}%")
coins_bought[coin]['take_profit'] = TAKE_PROFIT
if coins_bought[coin]['stop_loss'] < 0: coins_bought[coin]['stop_loss'] = STOP_LOSS
if hsp_head == 1 and len(coins_bought) == 0: print(f'Not holding any coins')
#print(f'')
return coins_sold
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
if DEBUG: print(orders)
for coin in orders:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'take_profit': TAKE_PROFIT,
'stop_loss': STOP_LOSS,
}
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
print(f'Order with id {orders[coin][0]["orderId"]} placed and saved to file')
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portfolio'''
for coin in coins_sold:
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
def write_log(logline):
timestamp = datetime.now().strftime("%d/%m %H:%M:%S")
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
def remove_external_signals(fileext):
signals = glob.glob('signals/*.{fileext}')
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
def load_settings():
# Load arguments then parse settings
#mymodule = {}
# set to false at Start
global bot_paused
bot_paused = False
DEFAULT_CONFIG_FILE = 'config.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
parsed_config = load_config(config_file)
# Default no debugging
global DEBUG, TEST_MODE, LOG_TRADES, LOG_FILE, DEBUG_SETTING, AMERICAN_USER, PAIR_WITH, QUANTITY, MAX_COINS, FIATS, TIME_DIFFERENCE, RECHECK_INTERVAL, CHANGE_IN_PRICE, STOP_LOSS, TAKE_PROFIT, CUSTOM_LIST, TICKERS_LIST, USE_TRAILING_STOP_LOSS, TRAILING_STOP_LOSS, TRAILING_TAKE_PROFIT, TRADING_FEE, SIGNALLING_MODULES
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
AMERICAN_USER = parsed_config['script_options'].get('AMERICAN_USER')
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
QUANTITY = parsed_config['trading_options']['QUANTITY']
MAX_COINS = parsed_config['trading_options']['MAX_COINS']
FIATS = parsed_config['trading_options']['FIATS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
STOP_LOSS = parsed_config['trading_options']['STOP_LOSS']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
TICKERS_LIST = parsed_config['trading_options']['TICKERS_LIST']
USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS']
TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS']
TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT']
TRADING_FEE = parsed_config['trading_options']['TRADING_FEE']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
if DEBUG_SETTING or args.debug:
DEBUG = True
def load_profit(file):
try:
with open(file) as file:
return yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError as fe:
exit(f'Could not find {file}')
except Exception as e:
exit(f'Encountered exception...\n {e}')
if __name__ == '__main__':
args = parse_args()
DEFAULT_CREDS_FILE = 'creds.yml'
DEFAULT_PROFIT_FILE = 'profit.yml'
mainnet_wait = 10
load_settings()
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
# Load creds for correct environment
parsed_creds = load_config(creds_file)
access_key, secret_key = load_correct_creds(parsed_creds)
if DEBUG:
print(f'loaded config below\n{json.dumps(parsed_config, indent=4)}')
print(f'Your credentials have been loaded from {creds_file}')
# Authenticate with the client, Ensure API key is good before continuing
if AMERICAN_USER:
client = Client(access_key, secret_key, tld='us')
else:
client = Client(access_key, secret_key)
# If the users has a bad / incorrect API key.
# this will stop the script from starting, and display a helpful error.
api_ready, msg = test_api_key(client, BinanceAPIException)
if api_ready is not True:
exit(f'{txcolors.SELL_LOSS}{msg}{txcolors.DEFAULT}')
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
if CUSTOM_LIST: tickers=[line.strip() for line in open(TICKERS_LIST)]
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
global coins_bought_file_path, last_sell_datetime
last_sell_datetime = "Never"
# path to the saved coins_bought file
coins_bought_file_path = 'coins_bought.json'
# use separate files for testing and live trading
if TEST_MODE:
coins_bought_file_path = 'test_' + coins_bought_file_path
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
print('Press Ctrl-C to stop the script')
if not TEST_MODE:
if not args.notimeout: # if notimeout skip this (fast for dev tests)
print('WARNING: You are using the Mainnet and live funds. Waiting',mainnet_wait,'seconds as a security measure')
time.sleep(mainnet_wait)
else:
print('You are using Test Mode')
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
try:
os.remove(filename)
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
if os.path.isfile("signals/paused.exc"):
try:
os.remove("signals/paused.exc")
except:
if DEBUG: print(f'{txcolors.WARNING}Could not remove external signalling file {filename}{txcolors.DEFAULT}')
remove_external_signals('buy')
remove_external_signals('sell')
remove_external_signals('pause')
# load signalling modules
signalthreads = []
mymodule = {}
try:
if len(SIGNALLING_MODULES) > 0:
for module in SIGNALLING_MODULES:
print(f'Starting {module}')
mymodule[module] = importlib.import_module(module)
# t = threading.Thread(target=mymodule[module].do_work, args=())
t = multiprocessing.Process(target=mymodule[module].do_work, args=())
t.name = module
t.daemon = True
t.start()
# add process to a list. This is so the thread can be terminated at a later time
signalthreads.append(t)
time.sleep(2)
else:
print(f'No modules to load {SIGNALLING_MODULES}')
except Exception as e:
print(f'Loading external signals exception: {e}')
# seed initial prices
get_price()
READ_TIMEOUT_COUNT=0
CONNECTION_ERROR_COUNT = 0
while True:
try:
#print(f'bot_paused while try ',bot_paused)
if bot_paused == False:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
except ReadTimeout as rt:
READ_TIMEOUT_COUNT += 1
print(f'{txcolors.WARNING}We got a timeout error from from binance. Going to re-loop. Current Count: {READ_TIMEOUT_COUNT}\n{rt}{txcolors.DEFAULT}')
except ConnectionError as ce:
CONNECTION_ERROR_COUNT +=1
print(f'{txcolors.WARNING}We got a timeout error from from binance. Going to re-loop. Current Count: {CONNECTION_ERROR_COUNT}\n{ce}{txcolors.DEFAULT}')
except KeyboardInterrupt as ki:
# stop external signal threads
stop_signal_threads()
# ask user if they want to sell all coins
print(f'\n\n\n')
sellall = input(f'{txcolors.WARNING}Program execution ended by user!\n\nDo you want to sell all coins (y/N)?{txcolors.DEFAULT}')
sellall = sellall.upper()
if sellall == "Y":
bot_paused = True
# sell all coins
#sell_all_coins('Program execution ended by user!')
os.system('python sell-remaining-coins.py')
coins_bought = {}
print(f'Removing file and resetting session profit : ',coins_bought_file_path)
if os.path.isfile(coins_bought_file_path):
os.remove(coins_bought_file_path)
coins_bought = {}
#with open(coins_bought_file_path) as file:
#coins_bought = json.load(file)
print(f'Program execution ended by user and all held coins sold !')
#print(f'Amount of held coins left : ',len(coins_bought))
print(f'')
sys.exit(0)
|
dag_processing.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import enum
import importlib
import logging
import multiprocessing
import os
import re
import signal
import sys
import time
import zipfile
from abc import ABCMeta, abstractmethod
from datetime import datetime
from importlib import import_module
from typing import Iterable, NamedTuple, Optional
import psutil
from setproctitle import setproctitle
from tabulate import tabulate
# To avoid circular imports
import airflow.models
from airflow.configuration import conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.models import errors
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.db import provide_session
from airflow.utils.helpers import reap_process_group
from airflow.utils.log.logging_mixin import LoggingMixin
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
"""
def __init__(self, dag, pickle_id=None):
"""
:param dag: the DAG
:type dag: airflow.models.DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
self._dag_id = dag.dag_id
self._task_ids = [task.task_id for task in dag.tasks]
self._full_filepath = dag.full_filepath
self._is_paused = dag.is_paused
self._concurrency = dag.concurrency
self._pickle_id = pickle_id
self._task_special_args = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if len(special_args) > 0:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self):
return self._task_special_args
def get_task_special_arg(self, task_id, special_arg_name):
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleTaskInstance:
def __init__(self, ti):
self._dag_id = ti.dag_id
self._task_id = ti.task_id
self._execution_date = ti.execution_date
self._start_date = ti.start_date
self._end_date = ti.end_date
self._try_number = ti.try_number
self._state = ti.state
self._executor_config = ti.executor_config
if hasattr(ti, 'run_as_user'):
self._run_as_user = ti.run_as_user
else:
self._run_as_user = None
if hasattr(ti, 'pool'):
self._pool = ti.pool
else:
self._pool = None
if hasattr(ti, 'priority_weight'):
self._priority_weight = ti.priority_weight
else:
self._priority_weight = None
self._queue = ti.queue
self._key = ti.key
@property
def dag_id(self):
return self._dag_id
@property
def task_id(self):
return self._task_id
@property
def execution_date(self):
return self._execution_date
@property
def start_date(self):
return self._start_date
@property
def end_date(self):
return self._end_date
@property
def try_number(self):
return self._try_number
@property
def state(self):
return self._state
@property
def pool(self):
return self._pool
@property
def priority_weight(self):
return self._priority_weight
@property
def queue(self):
return self._queue
@property
def key(self):
return self._key
@property
def executor_config(self):
return self._executor_config
@provide_session
def construct_task_instance(self, session=None, lock_for_update=False):
"""
Construct a TaskInstance from the database based on the primary key
:param session: DB session.
:param lock_for_update: if True, indicates that the database should
lock the TaskInstance (issuing a FOR UPDATE clause) until the
session is committed.
"""
TI = airflow.models.TaskInstance
qry = session.query(TI).filter(
TI.dag_id == self._dag_id,
TI.task_id == self._task_id,
TI.execution_date == self._execution_date)
if lock_for_update:
ti = qry.with_for_update().first()
else:
ti = qry.first()
return ti
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type list(airflow.utils.dag_processing.SimpleDagBag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def correct_maybe_zipped(fileloc):
"""
If the path contains a folder with a .zip suffix, then
the folder is treated as a zip archive and path to zip is returned.
"""
_, archive, _ = re.search(r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc
COMMENT_PATTERN = re.compile(r"\s*#.*")
def list_py_file_paths(directory, safe_mode=conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE', fallback=True),
include_examples=None):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions. If not provided, use the
core.DAG_DISCOVERY_SAFE_MODE configuration setting. If not set, default
to safe.
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
if include_examples is None:
include_examples = conf.getboolean('core', 'LOAD_EXAMPLES')
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns_by_dir = {}
for root, dirs, files in os.walk(directory, followlinks=True):
patterns = patterns_by_dir.get(root, [])
ignore_file = os.path.join(root, '.airflowignore')
if os.path.isfile(ignore_file):
with open(ignore_file, 'r') as file:
# If we have new patterns create a copy so we don't change
# the previous list (which would affect other subdirs)
lines_no_comments = [COMMENT_PATTERN.sub("", line) for line in file.read().split("\n")]
patterns += [re.compile(line) for line in lines_no_comments if line]
# If we can ignore any subdirs entirely we should - fewer paths
# to walk is better. We have to modify the ``dirs`` array in
# place for this to affect os.walk
dirs[:] = [
d
for d in dirs
if not any(p.search(os.path.join(root, d)) for p in patterns)
]
# We want patterns defined in a parent folder's .airflowignore to
# apply to subdirs too
for d in dirs:
patterns_by_dir[os.path.join(root, d)] = patterns
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
_, file_ext = os.path.splitext(os.path.split(file_path)[-1])
if file_ext != '.py' and not zipfile.is_zipfile(file_path):
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode and not zipfile.is_zipfile(file_path):
with open(file_path, 'rb') as fp:
content = fp.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
log = LoggingMixin().log
log.exception("Error while examining %s", f)
if include_examples:
import airflow.example_dags
example_dag_folder = airflow.example_dags.__path__[0]
file_paths.extend(list_py_file_paths(example_dag_folder, safe_mode, False))
return file_paths
class AbstractDagFileProcessor(metaclass=ABCMeta):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file()
:rtype: tuple[list[airflow.utils.dag_processing.SimpleDag], int]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
DagParsingStat = NamedTuple('DagParsingStat', [
('file_paths', Iterable[str]),
('done', bool),
('all_files_processed', bool)
])
DagFileStat = NamedTuple('DagFileStat', [
('num_dags', int),
('import_errors', int),
('last_finish_time', datetime),
('last_duration', float),
('run_count', int),
])
class DagParsingSignal(enum.Enum):
AGENT_HEARTBEAT = 'agent_heartbeat'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
async_mode):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
# Pipe for communicating signals
self._process = None
self._done = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn = None
self._collected_dag_buffer = []
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._file_paths,
self._max_runs,
self._processor_factory,
self._processor_timeout,
child_signal_conn,
self._async_mode,
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
def heartbeat(self):
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_HEARTBEAT)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_simple_dags calls _heartbeat_manager.
pass
def wait_until_finished(self):
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
return
@staticmethod
def _run_processor_manager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode):
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__CORE__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode)
processor_manager.start()
def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll():
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
simple_dags = self._collected_dag_buffer
self._collected_dag_buffer = []
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
return simple_dags
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
self._collected_dag_buffer.append(message)
def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and restart it if we are not done.
"""
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid, self._process.exitcode
)
self.start()
def _sync_metadata(self, stat):
"""
Sync metadata from stat queue and only keep the latest stat.
"""
self._file_paths = stat.file_paths
self._done = stat.done
self._all_files_processed = stat.all_files_processed
@property
def file_paths(self):
return self._file_paths
@property
def done(self):
return self._done
@property
def all_files_processed(self):
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
reap_process_group(self._process.pid, log=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:type _file_path_queue: list[unicode]
:type _processors: dict[unicode, AbstractDagFileProcessor]
"""
def __init__(self,
dag_directory,
file_paths,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode=True):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessor)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: airflow.models.connection.Connection
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
self._file_paths = file_paths
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._async_mode = async_mode
self._parallelism = conf.getint('scheduler', 'max_threads')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.error("Cannot use more than 1 thread when using sqlite. "
"Setting parallelism to 1")
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler',
'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# Map from file path to the processor
self._processors = {}
self._heartbeat_count = 0
# Map from file path to stats about the file
self._file_stats = {} # type: dict(str, DagFileStat)
self._last_zombie_query_time = timezone.utcnow()
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.utcnow()
# Last time stats were printed
self.last_stat_print_time = timezone.datetime(2000, 1, 1)
# TODO: Remove magic number
self._zombie_query_interval = 10
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
self._log = logging.getLogger('airflow.processor_manager')
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
# In sync mode we want timeout=None -- wait forever until a message is received
poll_time = None # type: Optional[float]
if self._async_mode:
poll_time = 0.0
self.log.debug("Starting DagFileProcessorManager in async mode")
else:
poll_time = None
self.log.debug("Starting DagFileProcessorManager in sync mode")
# Used to track how long it takes us to get once around every file in the DAG folder.
self._parsing_start_time = timezone.utcnow()
while True:
loop_start_time = time.time()
if self._signal_conn.poll(poll_time):
agent_signal = self._signal_conn.recv()
self.log.debug("Recived %s singal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
# continue the loop to parse dags
pass
elif not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
continue
self._refresh_dag_dir()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
if not self._async_mode:
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
simple_dags = self.collect_results()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
if self._async_mode:
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors()
except Exception:
self.log.exception("Error removing old import errors")
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if ((timezone.utcnow() - self.last_stat_print_time).total_seconds() > self.print_stats_interval):
if len(self._file_paths) > 0:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"# DAGs",
"# Errors",
"Last Runtime",
"Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((now - processor_start_time).total_seconds() if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge('dag_processing.last_run.seconds_ago.{}'.format(file_name), seconds_ago)
if runtime:
Stats.timing('dag_processing.last_duration.{}'.format(file_name), runtime)
# TODO: Remove before Airflow 2.0
Stats.timing('dag_processing.last_runtime.{}'.format(file_name), runtime)
rows.append((file_path,
processor_pid,
runtime,
num_dags,
num_errors,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime) if runtime else None,
num_dags,
num_errors,
"{:.2f}s".format(last_runtime) if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None
))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def collect_results(self):
"""
Collect the result from any finished DAG processors
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
self._kill_timed_out_processors()
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
Stats.decr('dag_processing.processes')
now = timezone.utcnow()
finished_processors[file_path] = processor
stat = DagFileStat(
len(processor.result[0]) if processor.result is not None else 0,
processor.result[1] if processor.result is not None else -1,
now,
(now - processor.start_time).total_seconds(),
self.get_run_count(file_path) + 1,
)
self._file_stats[file_path] = stat
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.warning(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result[0]:
simple_dags.append(simple_dag)
return simple_dags
def heartbeat(self):
"""
This should be periodically called by the manager loop. This method will
kick off new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
simple_dags = self.collect_results()
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
self.emit_metrics()
self._parsing_start_time = timezone.utcnow()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, stat in self._file_stats.items()
if stat.run_count == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(0, 0, None, None, 0)
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path)
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
# Update heartbeat count.
self._heartbeat_count += 1
return simple_dags
def _kill_timed_out_processors(self):
"""
Kill any file processors that timeout to defend against process hangs.
"""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.info(
"Processor for %s with PID %s started at %s has timed out, "
"killing it.",
processor.file_path, processor.pid, processor.start_time.isoformat())
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove ater Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._heartbeat_count < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
def emit_metrics(self):
"""
Emmit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = (timezone.utcnow() - self._parsing_start_time).total_seconds()
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge('dag_processing.import_errors',
sum(stat.import_errors for stat in self._file_stats.values()))
# TODO: Remove before Airflow 2.0
Stats.gauge('collect_dags', parse_time)
Stats.gauge('dagbag_import_errors', sum(stat.import_errors for stat in self._file_stats.values()))
|
main.py | #To run flask in its own thread
import threading
import sys
import os
import logging
import time
#To catch CTRL+C
import signal
import requests
import traceback
from config import *
from client_api import *
from encapsulator.userspace import *
CLIENT_HOST = "0.0.0.0"
CLIENT_PORT = 4243
#Gracefully handle CTRL+C
def cleanup(sig, frame):
print("Removing BPF program...")
try:
conf.encap.remove_bpf_program()
except:
print("ERROR: Couldn't remove BPF filter from the interface.")
print("BPF program removed")
print("Removing installed rules...")
clean_installed_rules(conf.installed_subnets, conf.interface, conf.installed_ips)
print("Removed all rules")
os._exit(0) #For some reason the usual exit function didn't close the flask thread
if __name__ == "__main__":
signal.signal(signal.SIGINT, cleanup)
if len(sys.argv) > 1:
for i in range(1, len(sys.argv)):
if sys.argv[i] == "--debug":
conf.debug = True
elif sys.argv[i] == "-i":
if i+1 < len(sys.argv):
conf.interface = sys.argv[i+1]
if not conf.debug:
logging.getLogger("urllib3.connectionpool").setLevel(logging.ERROR)
#Run encapsulator in another thread
try:
conf.encap.attach_bpf_program(conf.interface)
except:
print("ERROR: Unable to install bpf program to interface %s. Does the interface exist?" % conf.interface)
exit(1)
bpf = threading.Thread(target=conf.encap.manage_runtime)
bpf.daemon = True
bpf.start()
#Run flask in another thread
api = ClientAPI(CLIENT_HOST, CLIENT_PORT)
flask = threading.Thread(target=run_client_server)
flask.daemon = True
flask.start()
#Hang
print(""" _____ _ _ ________ ___ ___ _ _ _
/ __ \| | | |_ _| \/ | / _ \ | (_) | |
| / \/| |_| | | | | . . |/ /_\ \ ___| |_ ___ _ __ | |_
| | | _ | | | | |\/| || _ |/ __| | |/ _ \ '_ \| __|
| \__/\| | | |_| |_| | | || | | | (__| | | __/ | | | |_
\____/\_| |_/\___/\_| |_/\_| |_/\___|_|_|\___|_| |_|\__|
""")
while True:
time.sleep(60) |
train_pg_f18.py | """
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
"""
import numpy as np
import tensorflow as tf
import gym
import logz
import os
import time
import inspect
from multiprocessing import Process
# ============================================================================================#
# Utilities
# ============================================================================================#
# ========================================================================================#
# ----------PROBLEM 2----------
# ========================================================================================#
def normalize(random_v):
return (random_v - np.mean(random_v)) / (np.std(random_v) + 1e-08)
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feed forward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
output_size: size of the output layer
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of the hidden layer
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
Hint: use tf.layers.dense
"""
# YOUR CODE HERE
with tf.variable_scope(scope):
hidden = input_placeholder
for i in range(n_layers - 1):
hidden = tf.layers.dense(inputs=hidden, units=size, activation=activation)
output_placeholder = tf.layers.dense(inputs=hidden, units=output_size, activation=output_activation)
# raise NotImplementedError
return output_placeholder
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[ k ] if k in locals_ else None for k in args}
logz.save_params(params)
# ============================================================================================#
# Policy Gradient
# ============================================================================================#
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args[ 'ob_dim' ]
self.ac_dim = computation_graph_args[ 'ac_dim' ]
self.discrete = computation_graph_args[ 'discrete' ]
self.size = computation_graph_args[ 'size' ]
self.n_layers = computation_graph_args[ 'n_layers' ]
self.learning_rate = computation_graph_args[ 'learning_rate' ]
self.animate = sample_trajectory_args[ 'animate' ]
self.max_path_length = sample_trajectory_args[ 'max_path_length' ]
self.min_timesteps_per_batch = sample_trajectory_args[ 'min_timesteps_per_batch' ]
self.gamma = estimate_return_args[ 'gamma' ]
self.reward_to_go = estimate_return_args[ 'reward_to_go' ]
self.nn_baseline = estimate_return_args[ 'nn_baseline' ]
self.normalize_advantages = estimate_return_args[ 'normalize_advantages' ]
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() # pylint: disable=E1101
# ========================================================================================#
# ----------PROBLEM 2----------
# ========================================================================================#
def define_placeholders(self):
"""
Placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
See Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
"""
# raise NotImplementedError
sy_ob_no = tf.placeholder(shape=[None, self.ob_dim], name="ob", dtype=tf.float32)
if self.discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
# YOUR CODE HERE
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n
# ========================================================================================#
# ----------PROBLEM 2----------
# ========================================================================================#
def policy_forward_pass(self, sy_ob_no):
""" Constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
Hint: use the 'build_mlp' function to output the logits (in the discrete case)
and the mean (in the continuous case).
Pass in self.n_layers for the 'n_layers' argument, and
pass in self.size for the 'size' argument.
"""
# raise NotImplementedError
if self.discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(sy_ob_no, self.ac_dim, "policy", n_layers=self.n_layers, size=self.size)
return sy_logits_na
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(sy_ob_no, self.ac_dim, "policy", n_layers=self.n_layers, size=self.size)
sy_logstd = tf.get_variable("std", [1, self.ac_dim],
initializer=tf.contrib.layers.xavier_initializer(seed=1))
return (sy_mean, sy_logstd)
# ========================================================================================#
# ----------PROBLEM 2----------
# ========================================================================================#
def sample_action(self, policy_parameters):
""" Constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
returns:
sy_sampled_ac:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
"""
# raise NotImplementedError
if self.discrete:
# YOUR_CODE_HERE
sy_logits_na = policy_parameters
sy_sampled_ac = tf.squeeze(tf.multinomial(sy_logits_na, 1), axis=1)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
sy_sampled_ac = sy_mean + tf.random_normal(tf.shape(sy_mean)) * tf.exp(sy_logstd)
return sy_sampled_ac
# ========================================================================================#
# ----------PROBLEM 2----------
# ========================================================================================#
def get_log_prob(self, policy_parameters, sy_ac_na):
""" Constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
sy_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (self.ac_dim,)
sy_ac_na:
if discrete: (batch_size,)
if continuous: (batch_size, self.ac_dim)
returns:
sy_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
# raise NotImplementedError
if self.discrete:
sy_logits_na = policy_parameters
# YOUR_CODE_HERE
sy_logprob_n = -tf.nn.sparse_softmax_cross_entropy_with_logits(logits=sy_logits_na, labels=sy_ac_na)
else:
sy_mean, sy_logstd = policy_parameters
# YOUR_CODE_HERE
# should i include the term for the variance ??? basically i think i should as it differentiable
sy_logprob_n = -(0.5) * (
tf.reduce_sum(sy_logstd + tf.square((sy_ac_na - sy_mean) / tf.exp(sy_logstd)), axis=1))
return sy_logprob_n
def build_computation_graph(self):
"""
Notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_logprob_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
self.policy_parameters = self.policy_forward_pass(self.sy_ob_no)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_logprob_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
# ========================================================================================#
# ----------PROBLEM 2----------
# Loss Function and Training Operation
# ========================================================================================#
loss = -1* tf.reduce_mean(self.sy_logprob_n * self.sy_adv_n) # YOUR CODE HERE
self.update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
# ========================================================================================#
# ----------PROBLEM 6----------
# Optional Baseline
#
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# ========================================================================================#
if self.nn_baseline:
# raise NotImplementedError
self.baseline_prediction = tf.squeeze(build_mlp(
self.sy_ob_no,
1,
"nn_baseline",
n_layers=self.n_layers,
size=self.size))
# YOUR_CODE_HERE
self.sy_target_n = tf.placeholder(shape=[ None ], name="bl_target", dtype=tf.float32)
baseline_loss = tf.losses.mean_squared_error(self.sy_target_n, self.baseline_prediction)
self.baseline_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(baseline_loss)
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = [ ]
while True:
animate_this_episode = (len(paths) == 0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards = [ ], [ ], [ ]
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
# ====================================================================================#
# ----------PROBLEM 3----------
# ====================================================================================#
# raise NotImplementedError
# YOUR CODE HERE
ob_feed = np.array(ob)
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: ob_feed[None, :]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > self.max_path_length:
break
path = {"observation": np.array(obs, dtype=np.float32),
"reward": np.array(rewards, dtype=np.float32),
"action": np.array(acs, dtype=np.float32),
"steps":steps}
return path
# ====================================================================================#
# ----------PROBLEM 3----------
# ====================================================================================#
def sum_of_rewards(self, re_n):
"""
Monte Carlo estimation of the Q function.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
----------------------------------------------------------------------------------
Your code should construct numpy arrays for Q-values which will be used to compute
advantages (which will in turn be fed to the placeholder you defined in
Agent.define_placeholders).
Recall that the expression for the policy gradient PG is
PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
where
tau=(s_0, a_0, ...) is a trajectory,
Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
and b_t is a baseline which may depend on s_t.
You will write code for two cases, controlled by the flag 'reward_to_go':
Case 1: trajectory-based PG
(reward_to_go = False)
Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
entire trajectory (regardless of which time step the Q-value should be for).
For this case, the policy gradient estimator is
E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
where
Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
Thus, you should compute
Q_t = Ret(tau)
Case 2: reward-to-go PG
(reward_to_go = True)
Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
from time step t. Thus, you should compute
Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
like the 'ob_no' and 'ac_na' above.
"""
# YOUR_CODE_HERE
if self.reward_to_go:
# raise NotImplementedError
q_n = []
for r_path in re_n:
r_path_c=np.copy(r_path)
for ind in reversed(range(r_path.shape[0] - 1)):
r_path_c[ind] = r_path_c[ind] + self.gamma * r_path_c[ind + 1]
q_n.extend(r_path_c)
else:
# raise NotImplementedError
q_n = []
for r_path in re_n:
r_path_c = np.copy(r_path)
for ind in reversed(range(r_path.shape[0] - 1)):
r_path_c[ind] = r_path_c[ind] + self.gamma * r_path_c[ind + 1]
q_n.extend([r_path_c[0] for _ in range(r_path.shape[0])])
return q_n
def compute_advantage(self, ob_no, q_n):
"""
Computes advantages by (possibly) subtracting a baseline from the estimated Q values
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
# ====================================================================================#
# ----------PROBLEM 6----------
# Computing Baselines
# ====================================================================================#
if self.nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current batch of Q-values. (Goes with Hint
# #bl2 in Agent.update_parameters.
# raise NotImplementedError
# YOUR CODE HERE
b_n = normalize(self.sess.run(self.baseline_prediction, feed_dict={self.sy_ob_no: ob_no}))
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
return adv_n
def estimate_return(self, ob_no, re_n):
"""
Estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
q_n = self.sum_of_rewards(re_n)
adv_n = self.compute_advantage(ob_no, q_n)
# ====================================================================================#
# ----------PROBLEM 3----------
# Advantage Normalization
# ====================================================================================#
if self.normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# raise NotImplementedError
adv_n = normalize(adv_n) # YOUR_CODE_HERE
return q_n, adv_n
def update_parameters(self, ob_no, ac_na, q_n, adv_n):
"""
Update the parameters of the policy and (possibly) the neural network baseline,
which is trained to approximate the value function.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
# ====================================================================================#
# ----------PROBLEM 6----------
# Optimizing Neural Network Baseline
# ====================================================================================#
if self.nn_baseline:
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 in
# Agent.compute_advantage.)
# YOUR_CODE_HERE
# raise NotImplementedError
target_n = normalize(q_n)
_ = self.sess.run(self.baseline_update_op, feed_dict={self.sy_target_n: target_n, self.sy_ob_no: ob_no})
# ====================================================================================#
# ----------PROBLEM 3----------
# Performing the Policy Update
# ====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
# raise NotImplementedError
_ = self.sess.run(self.update_op, feed_dict={self.sy_ob_no: ob_no, self.sy_ac_na: ac_na, self.sy_adv_n: adv_n})
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
learning_rate,
reward_to_go,
animate,
logdir,
normalize_advantages,
nn_baseline,
seed,
n_layers,
size):
start = time.time()
# ========================================================================================#
# Set Up Logger
# ========================================================================================#
setup_logger(logdir, locals())
# ========================================================================================#
# Set Up Env
# ========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[ 0 ]
ac_dim = env.action_space.n if discrete else env.action_space.shape[ 0 ]
# ========================================================================================#
# Initialize Agent
# ========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'learning_rate': learning_rate,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'reward_to_go': reward_to_go,
'nn_baseline': nn_baseline,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
# ========================================================================================#
# Training Loop
# ========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************" % itr)
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([ path[ "observation" ] for path in paths ])
ac_na = np.concatenate([ path[ "action" ] for path in paths ])
re_n = [ path[ "reward" ] for path in paths ]
q_n, adv_n = agent.estimate_return(ob_no, re_n)
agent.update_parameters(ob_no, ac_na, q_n, adv_n)
# Log diagnostics
returns = [ path[ "reward" ].sum() for path in paths ]
steps = [ path[ "steps" ] for path in paths ]
ep_lengths = [ pathlength(path) for path in paths ]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=2)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not (os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not (os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10 * e
print('Running experiment with seed %d' % seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=True, #args.render,
logdir=os.path.join(logdir, '%d' % seed),
normalize_advantages=not (args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
test_browser.py | import BaseHTTPServer, multiprocessing, os, shutil, subprocess, unittest, zlib, webbrowser, time, shlex
from runner import BrowserCore, path_from_root, nonfastcomp
from tools.shared import *
# User can specify an environment variable EMSCRIPTEN_BROWSER to force the browser test suite to
# run using another browser command line than the default system browser.
emscripten_browser = os.environ.get('EMSCRIPTEN_BROWSER')
if emscripten_browser:
cmd = shlex.split(emscripten_browser)
def run_in_other_browser(url):
Popen(cmd + [url])
webbrowser.open_new = run_in_other_browser
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum):
class ChunkedServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:8888")
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
(start, end) = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data)-1, end)
length = end-start+1
s.sendheaders([],length)
s.wfile.write(data[start:end+1])
s.wfile.close()
expectedConns = 11
httpd = BaseHTTPServer.HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns+1):
httpd.handle_request()
class browser(BrowserCore):
@classmethod
def setUpClass(self):
super(browser, self).setUpClass()
print
print 'Running the browser tests. Make sure the browser allows popups from localhost.'
print
def test_html(self):
# test HTML generation.
self.btest('hello_world_sdl.cpp', reference='htmltest.png',
message='You should see "hello, world!" and a colored cube.')
def test_html_source_map(self):
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4'],
cwd=self.get_dir()).communicate()
assert os.path.exists(html_file)
assert os.path.exists(html_file + '.map')
webbrowser.open_new('file://' + html_file)
time.sleep(1)
print '''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step through and see the print (best to run with EM_SAVE_DIR=1 for the reload).
'''
def test_emscripten_log(self):
src = os.path.join(self.get_dir(), 'src.cpp')
open(src, 'w').write(self.with_report_result(open(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp')).read()))
Popen([PYTHON, EMCC, src, '--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g', '-o', 'page.html']).communicate()
self.run_browser('page.html', None, '/report_result?1')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK): return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
if WINDOWS and Building.which('mingw32-make'): # On Windows prefer using MinGW make if it exists, otherwise fall back to hoping we have cygwin make.
Popen(['doit.bat']).communicate()
else:
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_split(self):
def nfc():
# test HTML generation.
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.js', '--split', '100', '--pre-js', 'reftest.js']).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'something.js')), 'must be main js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something_functions.js')), 'must be functions js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something.include.html')), 'must be js include file'
open(os.path.join(self.get_dir(), 'something.html'), 'w').write('''
<!doctype html>
<html lang="en-us">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Emscripten-Generated Code</title>
<style>
.emscripten { padding-right: 0; margin-left: auto; margin-right: auto; display: block; }
canvas.emscripten { border: 1px solid black; }
textarea.emscripten { font-family: monospace; width: 80%; }
div.emscripten { text-align: center; }
</style>
</head>
<body>
<hr/>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()"></canvas>
<hr/>
<div class="emscripten"><input type="button" value="fullscreen" onclick="Module.requestFullScreen()"></div>
<hr/>
<textarea class="emscripten" id="output" rows="8"></textarea>
<hr>
<script type='text/javascript'>
// connect to canvas
var Module = {
preRun: [],
postRun: [],
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&");
//text = text.replace(/</g, "<");
//text = text.replace(/>/g, ">");
//text = text.replace('\\n', '<br>', 'g');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})(),
printErr: function(text) {
if (0) { // XXX disabled for safety typeof dump == 'function') {
dump(text + '\\n'); // fast, straight to the real console
} else {
console.log(text);
}
},
canvas: document.getElementById('canvas'),
setStatus: function(text) {
if (Module.setStatus.interval) clearInterval(Module.setStatus.interval);
var m = text.match(/([^(]+)\((\d+(\.\d+)?)\/(\d+)\)/);
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
if (m) {
text = m[1];
progressElement.value = parseInt(m[2])*100;
progressElement.max = parseInt(m[4])*100;
progressElement.hidden = false;
} else {
progressElement.value = null;
progressElement.max = null;
progressElement.hidden = true;
}
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function(left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies-left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
</script>''' + open(os.path.join(self.get_dir(), 'something.include.html')).read() + '''
</body>
</html>
''')
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
nonfastcomp(nfc)
def test_split_in_source_filenames(self):
def nfc():
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.js', '-g', '--split', '100', '--pre-js', 'reftest.js']).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'something.js')), 'must be main js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something', 'hello_world_sdl.cpp.js')), 'must be functions js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something.include.html')), 'must be js include file'
open(os.path.join(self.get_dir(), 'something.html'), 'w').write('''
<!doctype html>
<html lang="en-us">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Emscripten-Generated Code</title>
<style>
.emscripten { padding-right: 0; margin-left: auto; margin-right: auto; display: block; }
canvas.emscripten { border: 1px solid black; }
textarea.emscripten { font-family: monospace; width: 80%; }
div.emscripten { text-align: center; }
</style>
</head>
<body>
<hr/>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()"></canvas>
<hr/>
<div class="emscripten"><input type="button" value="fullscreen" onclick="Module.requestFullScreen()"></div>
<hr/>
<textarea class="emscripten" id="output" rows="8"></textarea>
<hr>
<script type='text/javascript'>
// connect to canvas
var Module = {
preRun: [],
postRun: [],
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&");
//text = text.replace(/</g, "<");
//text = text.replace(/>/g, ">");
//text = text.replace('\\n', '<br>', 'g');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})(),
printErr: function(text) {
if (0) { // XXX disabled for safety typeof dump == 'function') {
dump(text + '\\n'); // fast, straight to the real console
} else {
console.log(text);
}
},
canvas: document.getElementById('canvas'),
setStatus: function(text) {
if (Module.setStatus.interval) clearInterval(Module.setStatus.interval);
var m = text.match(/([^(]+)\((\d+(\.\d+)?)\/(\d+)\)/);
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
if (m) {
text = m[1];
progressElement.value = parseInt(m[2])*100;
progressElement.max = parseInt(m[4])*100;
progressElement.hidden = false;
} else {
progressElement.value = null;
progressElement.max = null;
progressElement.hidden = true;
}
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function(left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies-left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
</script>''' + open(os.path.join(self.get_dir(), 'something.include.html')).read() + '''
</body>
</html>
''')
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
nonfastcomp(nfc)
def test_compression(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("hello compressed world\n");
int result = 1;
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'page.js')), 'must be side js'
assert os.path.exists(os.path.join(self.get_dir(), 'page.js.compress')), 'must be side compressed js'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size > os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be smaller'
shutil.move(os.path.join(self.get_dir(), 'page.js'), 'page.js.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
def make_main(path):
print 'make main at', path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT();
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt")]
for test in test_cases:
(srcpath, dstpath) = test
print 'Testing', srcpath, dstpath
make_main(dstpath)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test that '--no-heap-copy' works.
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'somefile.txt', '--no-heap-copy', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete(self.in_dir('assets'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT();
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print srcpath
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html']).communicate()
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false);
};
''')
make_main('someotherfile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT();
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT();
return 0;
}
'''))
# by individual files
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'cdn'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
# change the file package base dir to look in a "cdn". note that normally you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "cdn/", '))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT();
return 0;
}
'''))
def test():
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html']).communicate()
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for filePackagePrefixURL
#open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { filePackagePrefixURL: "http:/localhost:8888/cdn/", '))
#test()
def test_compressed_file(self):
open(os.path.join(self.get_dir(), 'datafile.txt'), 'w').write('compress this please' + (2000*'.'))
open(os.path.join(self.get_dir(), 'datafile2.txt'), 'w').write('moar' + (100*'!'))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[21];
FILE *f = fopen("datafile.txt", "r");
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("file says: |%s|\n", buf);
int result = !strcmp("compress this please", buf);
FILE *f2 = fopen("datafile2.txt", "r");
fread(buf, 1, 5, f2);
buf[5] = 0;
fclose(f2);
result = result && !strcmp("moar!", buf);
printf("file 2 says: |%s|\n", buf);
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html', '--preload-file', 'datafile.txt', '--preload-file', 'datafile2.txt',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'datafile.txt')), 'must be data file'
assert os.path.exists(os.path.join(self.get_dir(), 'page.data.compress')), 'must be data file in compressed form'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size != os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be different'
shutil.move(os.path.join(self.get_dir(), 'datafile.txt'), 'datafile.txt.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', expected='1')
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"'
]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_compressed(self):
for image, width in [(path_from_root('tests', 'screenshot2.png'), 300),
(path_from_root('tests', 'screenshot.jpg'), 600)]:
self.clear()
print image
basename = os.path.basename(image)
shutil.copyfile(image, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
self.build_native_lzma()
Popen([
PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-o', 'page.html',
'--preload-file', basename, '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="' + basename + '"',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')
]).communicate()
shutil.move(os.path.join(self.get_dir(), basename), basename + '.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?' + str(width))
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'], also_proxied=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'])
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE=1', '--preload-file', 'screenshot.not'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O0', '-s', 'SAFE_HEAP=1'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1', '-O2', '-s', 'SAFE_HEAP=1'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 1000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
open('test.html', 'w').write(html)
def test_sdl_canvas_proxy(self):
open('data.txt', 'w').write('datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt'], manual_reference=True, post_build=self.post_manual_reftest)
def test_glgears_proxy(self):
self.btest('hello_world_gles_proxy.c', reference='gears.png', args=['--proxy-to-worker', '-s', 'GL_TESTING=1'], manual_reference=True, post_build=self.post_manual_reftest)
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING=1']).communicate()
open('test.html', 'w').write(open(path_from_root('src', 'shell_minimal.html')).read().replace('{{{ SCRIPT }}}', '<script src="test.js"></script>'))
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
self.btest('sdl_canvas_alpha.c', reference='sdl_canvas_alpha.png', reference_slack=9)
def test_sdl_key(self):
for defines in [[], ['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']]:
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html'] + defines + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'NO_EXIT_RUNTIME=1']).communicate()
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
open('test.html', 'w').write(html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'NO_EXIT_RUNTIME=1'], manual_reference=True, post_build=post)
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?740')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1')
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1')
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
open(os.path.join(self.get_dir(), 'sdl_joystick.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_joystick.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?2')
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
open(os.path.join(self.get_dir(), 'check_webgl_attributes_support.js'), 'w').write('''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js'])
def test_emscripten_get_now(self):
self.btest('emscripten_get_now.cpp', '1')
def test_file_db(self):
secret = str(time.time())
open('moar.txt', 'w').write(secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret)
shutil.copyfile('test.html', 'second.html')
open('moar.txt', 'w').write('aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for mode in [[], ['-s', 'MEMFS_APPEND_TO_TYPED_ARRAYS=1']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-DFIRST', '-DSECRET=\'' + secret + '\'', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', force_c=True, args=mode + ['-DSECRET=\'' + secret + '\'', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']'''])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
self.btest('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file', path_from_root('tests', 'sdl_canvas_size.html')])
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT=1', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'VERBOSE=1'])
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'],
message='You should see an image with fog.')
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_egl(self):
open(os.path.join(self.get_dir(), 'test_egl.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_worker(self):
# Test running in a web worker
open('file.dat', 'w').write('data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''')
html_file.close()
# no file data
for file_data in [0, 1]:
print 'file data', file_data
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else []) , stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('worker.js'), output
if not file_data: self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone
self.run_browser('main.html', '', '/report_result?hello%20from%20worker,%20and%20|' + ('data%20for%20w' if file_data else '') + '|')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""")
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["stderr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
Popen([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename]).communicate()
chunkSize = 1024
data = os.urandom(10*chunkSize+1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data)
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True,chunkSize,data,checksum,))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
def test_glgears_long(self):
for proxy in [0, 1]:
print 'proxy', proxy
self.btest('hello_world_gles.c', expected=map(str, range(30, 500)), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST'] + (['--proxy-to-worker'] if proxy else []))
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print full_es2
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []),
).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1'])
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print program
basename = os.path.basename(program)
args = []
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args = ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args = ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')), args=args)
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
#(os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
#(os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print source
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']'''])
def test_emscripten_api2(self):
def setup():
open('script1.js', 'w').write('''
Module._set(456);
''')
open('file1.txt', 'w').write('first');
open('file2.txt', 'w').write('second');
setup()
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
# check using file packager to another dir
self.clear()
setup()
os.mkdir('sub')
Popen([PYTHON, FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w')).communicate()
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']'''])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1')
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1')
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1')
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1')
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1')
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1')
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'])
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION=1'], reference_slack=1)
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'], also_proxied=True)
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS=1'], expected=['1'])
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328'], args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', reference='cubegeom_pre.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', reference='cubegeom_pre2.png', args=['-s', 'GL_DEBUG=1', '-s', 'LEGACY_GL_EMULATION=1']) # some coverage for GL_DEBUG not breaking the build
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', reference='cubegeom_pre2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom(self):
self.btest('cubegeom.c', reference='cubegeom.png', args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_proc(self):
open('side.c', 'w').write(r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
for opts in [0, 1]:
self.btest('cubegeom_proc.c', reference='cubegeom.png', args=['-O' + str(opts), 'side.c', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', reference='cubegeom.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', reference='cubegeom_color.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', reference='cubegeom_normal.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', reference='cubegeom_normal_dap_far_glda.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', reference='cubegeom_normal_dap_far_glda_quad.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', reference='cubegeom_mt.png', args=['-s', 'LEGACY_GL_EMULATION=1']) # multitexture
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', reference='cubegeom_color2.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', reference='cubegeom_texturematrix.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', reference='cubegeom_fog.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', reference='cubegeom_pre_vao.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', reference='cubegeom_pre2_vao2.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION=1'], also_proxied=True)
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION=1'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', reference='sdl_canvas_twice.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=' + str(1024*1024*8)])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', reference='htmltest.png')
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION=1', '-s', 'GL_FFP_ONLY=1'])
def test_s3tc_crunch(self):
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds', 'water.dds'], stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.5*(os.stat('ship.dds').st_size+os.stat('bloom.dds').st_size+os.stat('water.dds').st_size), 'Compressed should be smaller than dds'
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'pre.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_s3tc_crunch_split(self): # load several datafiles/outputs of file packager
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'asset_a.data', '--crunch', '--preload', 'ship.dds', 'bloom.dds'], stdout=open('asset_a.js', 'w')).communicate()
Popen([PYTHON, FILE_PACKAGER, 'asset_b.data', '--crunch', '--preload', 'water.dds'], stdout=open('asset_b.js', 'w')).communicate()
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'asset_a.js', '--pre-js', 'asset_b.js', '-s', 'LEGACY_GL_EMULATION=1'])
def test_aniso(self):
if SPIDERMONKEY_ENGINE in JS_ENGINES:
# asm.js-ification check
Popen([PYTHON, EMCC, path_from_root('tests', 'aniso.c'), '-O2', '-g2', '-s', 'LEGACY_GL_EMULATION=1']).communicate()
Settings.ASM_JS = 1
self.run_generated_code(SPIDERMONKEY_ENGINE, 'a.out.js', assert_returncode=None)
print 'passed asm test'
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION=1'])
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png')
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png')
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION=1'])
def test_runtimelink(self):
return self.skip('BUILD_AS_SHARED_LIB=2 is deprecated')
main, supp = self.setup_runtimelink_test()
open(self.in_dir('supp.cpp'), 'w').write(supp)
Popen([PYTHON, EMCC, self.in_dir('supp.cpp'), '-o', 'supp.js', '-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'BUILD_AS_SHARED_LIB=2', '-O2', '-s', 'ASM_JS=0']).communicate()
shutil.move(self.in_dir('supp.js'), self.in_dir('supp.so'))
self.btest(main, args=['-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'RUNTIME_LINKED_LIBS=["supp.so"]', '-DBROWSER=1', '-O2', '-s', 'ASM_JS=0'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
Module.print('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
var assert = function(check, text) {
if (!check) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?9');
xhr.onload = function() {
window.close();
};
xhr.send();
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
def test_mem_init_ccall(self):
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
function doCcall() {
ccall('note', 'string', ['number'], [2]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall() {
var str = wrapped(3);
Module.print('got ' + str);
assert(str === 'silly-string');
}
var ok = true;
try {
doCcall();
ok = false; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
Module.print('expected fail 1');
ABORT = false; // hackish
}
assert(ok);
ok = true;
try {
doCwrapCall();
ok = false; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
Module.print('expected fail 2');
ABORT = false; // hackish
}
assert(ok);
function myJSCallback() {
// called from main, this is an ok time
doCcall();
doCwrapCall();
}
''')
self.btest('mem_init.cpp', expected='3', args=['--post-js', 'post.js', '--memory-init-file', '1'])
def test_worker_api(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]']).communicate()
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_3_main.cpp', expected='5')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
def test_module(self):
def nfc():
Popen([PYTHON, EMCC, path_from_root('tests', 'browser_module.cpp'), '-o', 'module.js', '-O2', '-s', 'SIDE_MODULE=1', '-s', 'DLOPEN_SUPPORT=1', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two"]']).communicate()
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE=1', '-s', 'DLOPEN_SUPPORT=1'], expected='8')
nonfastcomp(nfc)
def test_mmap_file(self):
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
for extra_args in [[], ['--no-heap-copy']]:
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'] + extra_args)
def test_emrun_info(self):
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--system_info', '--browser_info'])
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = subprocess.check_output([PYTHON, path_from_root('emrun'), '--list_browsers'])
assert 'Traceback' not in result
def test_emrun(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_exit.c'), '--emrun', '-o', 'hello_world.html']).communicate()
outdir = os.getcwd()
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the browser that is launched will have that directory as startup directory,
# and the browser will not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to delete it. Therefore switch away from that directory
# before launching.
os.chdir(path_from_root())
args = [PYTHON, path_from_root('emrun'), '--timeout', '30', '--verbose', os.path.join(outdir, 'hello_world.html'), '1', '2', '3', '--log_stdout', os.path.join(outdir, 'stdout.txt'), '--log_stderr', os.path.join(outdir, 'stderr.txt')]
if emscripten_browser is not None:
args += ['--browser', emscripten_browser]
process = subprocess.Popen(args)
process.communicate()
stdout = open(os.path.join(outdir, 'stdout.txt'), 'r').read()
stderr = open(os.path.join(outdir, 'stderr.txt'), 'r').read()
assert process.returncode == 100
assert 'argc: 4' in stdout
assert 'argv[3]: 3' in stdout
assert 'hello, world!' in stdout
assert 'hello, error stream!' in stderr
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
Popen([PYTHON, EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', path_from_root('tests', 'uuid', 'test.js')], stdout=PIPE, stderr=PIPE).communicate()
test_js_closure = open(path_from_root('tests', 'uuid', 'test.js')).read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = run_js(path_from_root('tests', 'uuid', 'test.js'), full_output=True)
print out
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1')
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-s', 'LEGACY_GL_EMULATION=1', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
open('pre.js', 'w').write(r'''
if (typeof Module === 'undefined') Module = eval('(function() { try { return Module || {} } catch(e) { return {} } })()');
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
Module['addRunDependency']('test_run_dependency');
Module['removeRunDependency']('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js', '-o', 'test.html'], expected='1')
def test_html5(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_html5.c'), args=opts, expected='0')
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print opts
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_codemods(self):
for opt_level in [0, 2]:
print 'opt level', opt_level
opts = '-O' + str(opt_level)
# sanity checks, building with and without precise float semantics generates different results
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '-s', 'PRECISE_F32=2']) # empty polyfill, but browser has support, so semantics are like float
# now use a shell to remove the browser's fround support
open(self.in_dir('shell.html'), 'w').write(open(path_from_root('src', 'shell.html')).read().replace('var Module = {', '''
Math.fround = null;
var Module = {
'''))
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='1', args=[opts, '--shell-file', 'shell.html', '-s', 'PRECISE_F32=1'])
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '-s', 'PRECISE_F32=2']) # empty polyfill, no browser support, so semantics are like double
# finally, remove fround, patch up fround as the code executes (after polyfilling etc.), to verify that we got rid of it entirely on the client side
fixer = 'python fix.py'
open('fix.py', 'w').write(r'''
import sys
filename = sys.argv[1]
js = open(filename).read()
replaced = js.replace("var Math_fround = Math.fround;", "var Math_fround = Math.fround = function(x) { return 0; }")
assert js != replaced
open(filename, 'w').write(replaced)
''')
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer]) # no fround anyhow
self.btest(path_from_root('tests', 'codemods.cpp'), expected='121378', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer, '-s', 'PRECISE_F32=1']) # proper polyfill was enstated, then it was replaced by the fix so 0 is returned all the time, hence a different result here
self.btest(path_from_root('tests', 'codemods.cpp'), expected='2', args=[opts, '--shell-file', 'shell.html', '--js-transform', fixer, '-s', 'PRECISE_F32=2']) # we should remove the calls to the polyfill ENTIRELY here, on the clientside, so we should NOT see any calls to fround here, and result should be like double
|
PythonLock.py | import time, threading
balance = 0
def change_it(n):
global balance
balance += n
balance -= n
def run_thread(n):
for i in range(100000):
change_it(n)
t1 = threading.Thread(target=run_thread, args=(5,))
t2 = threading.Thread(target=run_thread, args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print(balance)
# 创建一个锁就是通过threading.Lock()来实现:
balance = 0
lock = threading.Lock()
def run_thread(n):
for i in range(100000):
# 先获取锁
lock.acquire()
try:
change_it(n)
finally:
# 释放锁
lock.release()
print(balance) |
serverprox.py | from flcore.clients.clientprox import clientProx
from flcore.servers.serverbase import Server
from threading import Thread
class FedProx(Server):
def __init__(self, args, times):
super().__init__(args, times)
# select slow clients
self.set_slow_clients()
self.set_clients(args, clientProx)
print(f"\nJoin ratio / total clients: {self.join_ratio} / {self.num_clients}")
print("Finished creating server and clients.")
# self.load_model()
def train(self):
for i in range(self.global_rounds+1):
self.selected_clients = self.select_clients()
self.send_models()
if i%self.eval_gap == 0:
print(f"\n-------------Round number: {i}-------------")
print("\nEvaluate global model")
self.evaluate()
for client in self.selected_clients:
client.train()
# threads = [Thread(target=client.train)
# for client in self.selected_clients]
# [t.start() for t in threads]
# [t.join() for t in threads]
self.receive_models()
self.aggregate_parameters()
print("\nBest global accuracy.")
# self.print_(max(self.rs_test_acc), max(
# self.rs_train_acc), min(self.rs_train_loss))
print(max(self.rs_test_acc))
self.save_results()
self.save_global_model()
|
test_runner.py | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import datetime
import logging
import multiprocessing
import os
import re
import sys
import time
import urllib.request
from tempfile import TemporaryDirectory
import argparse
import boto3
import pytest
from assertpy import assert_that
from framework.tests_configuration.config_renderer import dump_rendered_config_file, read_config_file
from framework.tests_configuration.config_utils import get_all_regions
from framework.tests_configuration.config_validator import assert_valid_config
from reports_generator import generate_cw_report, generate_json_report, generate_junitxml_merged_report
from retrying import retry
from utils import InstanceTypesData
logger = logging.getLogger()
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(module)s - %(message)s", level=logging.INFO)
START_TIME = time.time()
START_TIME_ISO = datetime.datetime.fromtimestamp(START_TIME).isoformat()
LOGS_DIR = "{0}.logs".format(START_TIME)
OUT_DIR = "{0}.out".format(START_TIME)
TEST_DEFAULTS = {
"parallelism": None,
"retry_on_failures": False,
"features": "", # empty string means all
"regions": [],
"oss": [],
"schedulers": [],
"instances": [],
"dry_run": False,
"reports": [],
"cw_region": "us-east-1",
"cw_namespace": "ParallelCluster/IntegrationTests",
"cw_timestamp_day_start": False,
"sequential": False,
"output_dir": "tests_outputs",
"custom_node_url": None,
"custom_cookbook_url": None,
"createami_custom_cookbook_url": None,
"cookbook_git_ref": None,
"node_git_ref": None,
"ami_owner": None,
"createami_custom_node_url": None,
"custom_awsbatchcli_url": None,
"custom_ami": None,
"pre_install": None,
"post_install": None,
"vpc_stack": None,
"api_uri": None,
"cluster": None,
"api_definition_s3_uri": None,
"api_infrastructure_s3_uri": None,
"public_ecr_image_uri": None,
"no_delete": False,
"benchmarks": False,
"benchmarks_target_capacity": 200,
"benchmarks_max_time": 30,
"stackname_suffix": "",
"delete_logs_on_success": False,
"tests_root_dir": "./tests",
"instance_types_data": None,
"use_default_iam_credentials": False,
"iam_user_role_stack_name": None,
"directory_stack_name": None,
"ldaps_nlb_stack_name": None,
}
def _init_argparser():
parser = argparse.ArgumentParser(
description="Run integration tests suite.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--key-name", help="Key to use for EC2 instances", required=True)
parser.add_argument("--key-path", help="Path to the key to use for SSH connections", required=True, type=_is_file)
parser.add_argument(
"-n", "--parallelism", help="Tests parallelism for every region.", default=TEST_DEFAULTS.get("parallelism")
)
parser.add_argument(
"--sequential",
help="Run tests in a single process. When not specified tests will spawn a process for each region under test.",
action="store_true",
default=TEST_DEFAULTS.get("sequential"),
)
parser.add_argument(
"--credential",
action="append",
help="STS credential to assume when running tests in a specific region."
"Credentials need to be in the format <region>,<endpoint>,<ARN>,<externalId> and can"
" be specified multiple times. <region> represents the region credentials are used for, <endpoint> is the sts "
" endpoint to contact in order to assume credentials, <account-id> is the id of the account where the role to "
" assume is defined, <externalId> is the id to use when assuming the role. "
"(e.g. ap-east-1,https://sts.us-east-1.amazonaws.com,arn:aws:iam::<account-id>:role/role-to-assume,externalId)",
required=False,
)
parser.add_argument(
"--use-default-iam-credentials",
help="Use the default IAM creds to run pcluster CLI commands. Skips the creation of pcluster CLI IAM role.",
action="store_true",
default=TEST_DEFAULTS.get("use_default_iam_credentials"),
)
parser.add_argument(
"--retry-on-failures",
help="Retry once more the failed tests after a delay of 60 seconds.",
action="store_true",
default=TEST_DEFAULTS.get("retry_on_failures"),
)
parser.add_argument(
"--tests-root-dir",
help="Root dir where integration tests are defined",
default=TEST_DEFAULTS.get("tests_root_dir"),
)
dimensions_group = parser.add_argument_group("Test dimensions")
dimensions_group.add_argument(
"-c",
"--tests-config",
help="Config file that specifies the tests to run and the dimensions to enable for each test. "
"Note that when a config file is used the following flags are ignored: instances, regions, oss, schedulers. "
"Refer to the docs for further details on the config format: "
"https://github.com/aws/aws-parallelcluster/blob/develop/tests/integration-tests/README.md",
)
dimensions_group.add_argument(
"-i",
"--instances",
help="AWS instances under test. Ignored when tests-config is used.",
default=TEST_DEFAULTS.get("instances"),
nargs="*",
)
dimensions_group.add_argument(
"-o",
"--oss",
help="OSs under test. Ignored when tests-config is used.",
default=TEST_DEFAULTS.get("oss"),
nargs="*",
)
dimensions_group.add_argument(
"-s",
"--schedulers",
help="Schedulers under test. Ignored when tests-config is used.",
default=TEST_DEFAULTS.get("schedulers"),
nargs="*",
)
dimensions_group.add_argument(
"-r",
"--regions",
help="AWS regions where tests are executed. Ignored when tests-config is used.",
default=TEST_DEFAULTS.get("regions"),
nargs="*",
)
dimensions_group.add_argument(
"-f",
"--features",
help="Run only tests for the listed features. Prepending the not keyword to the feature name causes the "
"feature to be excluded.",
default=TEST_DEFAULTS.get("features"),
nargs="+",
)
reports_group = parser.add_argument_group("Test reports")
reports_group.add_argument(
"--show-output",
help="Do not redirect tests stdout to file. Not recommended when running in multiple regions.",
action="store_true",
default=TEST_DEFAULTS.get("show_output"),
)
reports_group.add_argument(
"--reports",
help="create tests report files. junitxml creates a junit-xml style report file. html creates an html "
"style report file. json creates a summary with details for each dimensions. cw publishes tests metrics into "
"CloudWatch",
nargs="+",
choices=["html", "junitxml", "json", "cw"],
default=TEST_DEFAULTS.get("reports"),
)
reports_group.add_argument(
"--cw-region", help="Region where to publish CloudWatch metrics", default=TEST_DEFAULTS.get("cw_region")
)
reports_group.add_argument(
"--cw-namespace",
help="CloudWatch namespace where to publish metrics",
default=TEST_DEFAULTS.get("cw_namespace"),
)
reports_group.add_argument(
"--cw-timestamp-day-start",
action="store_true",
help="CloudWatch metrics pushed with at timestamp equal to the start of the current day (midnight)",
default=TEST_DEFAULTS.get("cw_timestamp_day_start"),
)
reports_group.add_argument(
"--output-dir", help="Directory where tests outputs are generated", default=TEST_DEFAULTS.get("output_dir")
)
custom_group = parser.add_argument_group("Custom packages and templates")
custom_group.add_argument(
"--custom-node-url",
help="URL to a custom node package.",
default=TEST_DEFAULTS.get("custom_node_url"),
type=_is_url,
)
custom_group.add_argument(
"--custom-cookbook-url",
help="URL to a custom cookbook package.",
default=TEST_DEFAULTS.get("custom_cookbook_url"),
type=_is_url,
)
custom_group.add_argument(
"--createami-custom-cookbook-url",
help="URL to a custom cookbook package for the createami command.",
default=TEST_DEFAULTS.get("createami_custom_cookbook_url"),
type=_is_url,
)
custom_group.add_argument(
"--createami-custom-node-url",
help="URL to a custom node package for the createami command.",
default=TEST_DEFAULTS.get("createami_custom_node_url"),
type=_is_url,
)
custom_group.add_argument(
"--custom-awsbatchcli-url",
help="URL to a custom awsbatch cli package.",
default=TEST_DEFAULTS.get("custom_awsbatchcli_url"),
type=_is_url,
)
custom_group.add_argument(
"--pre-install", help="URL to a pre install script", default=TEST_DEFAULTS.get("pre_install")
)
custom_group.add_argument(
"--post-install", help="URL to a post install script", default=TEST_DEFAULTS.get("post_install")
)
custom_group.add_argument(
"--instance-types-data",
help="Additional information about instance types used in the tests. The format is a JSON map "
"instance_type -> data, where data must respect the same structure returned by ec2 "
"describe-instance-types",
default=TEST_DEFAULTS.get("instance_types_data"),
)
ami_group = parser.add_argument_group("AMI selection parameters")
ami_group.add_argument(
"--custom-ami", help="custom AMI to use for all tests.", default=TEST_DEFAULTS.get("custom_ami")
)
ami_group.add_argument(
"--pcluster-git-ref",
help="Git ref of the custom cli package used to build the AMI.",
default=TEST_DEFAULTS.get("pcluster_git_ref"),
)
ami_group.add_argument(
"--cookbook-git-ref",
help="Git ref of the custom cookbook package used to build the AMI.",
default=TEST_DEFAULTS.get("cookbook_git_ref"),
)
ami_group.add_argument(
"--node-git-ref",
help="Git ref of the custom node package used to build the AMI.",
default=TEST_DEFAULTS.get("node_git_ref"),
)
ami_group.add_argument(
"--ami-owner",
help="Override the owner value when fetching AMIs to use with cluster. By default pcluster uses amazon.",
default=TEST_DEFAULTS.get("ami_owner"),
)
banchmarks_group = parser.add_argument_group("Benchmarks")
banchmarks_group.add_argument(
"--benchmarks",
help="run benchmarks tests. This disables the execution of all tests defined under the tests directory.",
action="store_true",
default=TEST_DEFAULTS.get("benchmarks"),
)
banchmarks_group.add_argument(
"--benchmarks-target-capacity",
help="set the target capacity for benchmarks tests",
default=TEST_DEFAULTS.get("benchmarks_target_capacity"),
type=int,
)
banchmarks_group.add_argument(
"--benchmarks-max-time",
help="set the max waiting time in minutes for benchmarks tests",
default=TEST_DEFAULTS.get("benchmarks_max_time"),
type=int,
)
api_group = parser.add_argument_group("API options")
api_group.add_argument(
"--api-definition-s3-uri",
help="URI of the Docker image for the Lambda of the ParallelCluster API",
default=TEST_DEFAULTS.get("api_definition_s3_uri"),
)
api_group.add_argument(
"--api-infrastructure-s3-uri",
help="URI of the CloudFormation template for the ParallelCluster API",
default=TEST_DEFAULTS.get("api_definition_s3_uri"),
)
api_group.add_argument(
"--public-ecr-image-uri",
help="S3 URI of the ParallelCluster API spec",
default=TEST_DEFAULTS.get("public_ecr_image_uri"),
)
api_group.add_argument(
"--api-uri",
help="URI of an existing ParallelCluster API",
default=TEST_DEFAULTS.get("api_uri"),
)
debug_group = parser.add_argument_group("Debugging/Development options")
debug_group.add_argument(
"--vpc-stack", help="Name of an existing vpc stack.", default=TEST_DEFAULTS.get("vpc_stack")
)
debug_group.add_argument(
"--cluster", help="Use an existing cluster instead of creating one.", default=TEST_DEFAULTS.get("cluster")
)
debug_group.add_argument(
"--no-delete",
action="store_true",
help="Don't delete stacks after tests are complete.",
default=TEST_DEFAULTS.get("no_delete"),
)
debug_group.add_argument(
"--delete-logs-on-success",
help="delete CloudWatch logs when a test succeeds",
action="store_true",
default=TEST_DEFAULTS.get("delete_logs_on_success"),
)
debug_group.add_argument(
"--stackname-suffix",
help="set a suffix in the integration tests stack names",
default=TEST_DEFAULTS.get("stackname_suffix"),
)
debug_group.add_argument(
"--dry-run",
help="Only show the list of tests that would run with specified options.",
action="store_true",
default=TEST_DEFAULTS.get("dry_run"),
)
debug_group.add_argument(
"--iam-user-role-stack-name",
help="Name of an existing IAM user role stack.",
default=TEST_DEFAULTS.get("iam_user_role_stack_name"),
)
debug_group.add_argument(
"--directory-stack-name",
help="Name of CFN stack providing AD domain to be used for testing AD integration feature.",
default=TEST_DEFAULTS.get("directory_stack_name"),
)
debug_group.add_argument(
"--ldaps-nlb-stack-name",
help="Name of CFN stack providing NLB to enable use of LDAPS with a Simple AD directory when testing AD "
"integration feature.",
default=TEST_DEFAULTS.get("ldaps_nlb_stack_name"),
)
return parser
def _is_file(value):
if not os.path.isfile(value):
raise argparse.ArgumentTypeError("'{0}' is not a valid file".format(value))
return value
@retry(stop_max_attempt_number=10, wait_fixed=5000)
def _is_url(value):
scheme = urllib.request.urlparse(value).scheme
if scheme in ["https", "s3", "file"]:
try:
if scheme == "s3":
match = re.match(r"s3://(.*?)/(.*)", value)
if not match or len(match.groups()) < 2:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid S3url")
else:
bucket_name, object_name = match.group(1), match.group(2)
boto3.client("s3").head_object(Bucket=bucket_name, Key=object_name)
else:
urllib.request.urlopen(value)
return value
except Exception as e:
raise argparse.ArgumentTypeError(f"'{value}' is not a valid url:{e}")
else:
raise argparse.ArgumentTypeError("'{0}' is not a valid url".format(value))
def _test_config_file(value):
_is_file(value)
try:
config = read_config_file(value)
return config
except Exception:
raise argparse.ArgumentTypeError("'{0}' is not a valid test config".format(value))
def _join_with_not(args):
"""
Join 'not' with next token, so they
can be used together as single pytest marker
"""
it = iter(args)
while True:
try:
current = next(it)
except StopIteration:
break
if current == "not":
try:
current += " " + next(it)
except StopIteration:
raise Exception("'not' needs to be always followed by an item")
yield current
def _get_pytest_args(args, regions, log_file, out_dir): # noqa: C901
pytest_args = ["-s", "-vv", "-l"]
pytest_args.append("--tests-log-file={0}/{1}".format(args.output_dir, log_file))
pytest_args.append("--output-dir={0}/{1}".format(args.output_dir, out_dir))
pytest_args.append(f"--key-name={args.key_name}")
pytest_args.append(f"--key-path={args.key_path}")
pytest_args.extend(["--stackname-suffix", args.stackname_suffix])
if args.benchmarks:
pytest_args.append("--ignore=./tests")
pytest_args.append("--rootdir=./benchmarks")
pytest_args.append("--benchmarks-target-capacity={0}".format(args.benchmarks_target_capacity))
pytest_args.append("--benchmarks-max-time={0}".format(args.benchmarks_max_time))
else:
pytest_args.extend(["--rootdir", args.tests_root_dir])
pytest_args.append("--ignore=./benchmarks")
# Show all tests durations
pytest_args.append("--durations=0")
# Run only tests with the given markers
if args.features:
pytest_args.append("-m")
pytest_args.append(" or ".join(list(_join_with_not(args.features))))
if args.tests_config:
_set_tests_config_args(args, pytest_args, out_dir)
if args.instance_types_data:
pytest_args.append("--instance-types-data-file={0}".format(args.instance_types_data))
if regions:
pytest_args.append("--regions")
pytest_args.extend(regions)
if args.instances:
pytest_args.append("--instances")
pytest_args.extend(args.instances)
if args.oss:
pytest_args.append("--oss")
pytest_args.extend(args.oss)
if args.schedulers:
pytest_args.append("--schedulers")
pytest_args.extend(args.schedulers)
if args.delete_logs_on_success:
pytest_args.append("--delete-logs-on-success")
if args.credential:
pytest_args.append("--credential")
pytest_args.extend(args.credential)
if args.use_default_iam_credentials:
pytest_args.append("--use-default-iam-credentials")
if args.retry_on_failures:
# Rerun tests on failures for one more time after 60 seconds delay
pytest_args.extend(["--reruns", "1", "--reruns-delay", "60"])
if args.parallelism:
pytest_args.extend(["-n", args.parallelism])
if args.dry_run:
pytest_args.append("--collect-only")
if any(report in ["junitxml", "json", "cw"] for report in args.reports):
pytest_args.append("--junit-xml={0}/{1}/results.xml".format(args.output_dir, out_dir))
if "html" in args.reports:
pytest_args.append("--html={0}/{1}/results.html".format(args.output_dir, out_dir))
_set_custom_packages_args(args, pytest_args)
_set_ami_args(args, pytest_args)
_set_custom_stack_args(args, pytest_args)
_set_api_args(args, pytest_args)
return pytest_args
def _set_custom_packages_args(args, pytest_args): # noqa: C901
if args.custom_node_url:
pytest_args.extend(["--custom-node-package", args.custom_node_url])
if args.custom_cookbook_url:
pytest_args.extend(["--custom-chef-cookbook", args.custom_cookbook_url])
if args.createami_custom_cookbook_url:
pytest_args.extend(["--createami-custom-chef-cookbook", args.createami_custom_cookbook_url])
if args.createami_custom_node_url:
pytest_args.extend(["--createami-custom-node-package", args.createami_custom_node_url])
if args.custom_awsbatchcli_url:
pytest_args.extend(["--custom-awsbatchcli-package", args.custom_awsbatchcli_url])
if args.pre_install:
pytest_args.extend(["--pre-install", args.pre_install])
if args.post_install:
pytest_args.extend(["--post-install", args.post_install])
def _set_ami_args(args, pytest_args):
if args.custom_ami:
pytest_args.extend(["--custom-ami", args.custom_ami])
if args.pcluster_git_ref:
pytest_args.extend(["--pcluster-git-ref", args.pcluster_git_ref])
if args.cookbook_git_ref:
pytest_args.extend(["--cookbook-git-ref", args.cookbook_git_ref])
if args.node_git_ref:
pytest_args.extend(["--node-git-ref", args.node_git_ref])
if args.ami_owner:
pytest_args.extend(["--ami-owner", args.ami_owner])
def _set_custom_stack_args(args, pytest_args):
if args.vpc_stack:
pytest_args.extend(["--vpc-stack", args.vpc_stack])
if args.cluster:
pytest_args.extend(["--cluster", args.cluster])
if args.no_delete:
pytest_args.append("--no-delete")
if args.iam_user_role_stack_name:
pytest_args.extend(["--iam-user-role-stack-name", args.iam_user_role_stack_name])
if args.directory_stack_name:
pytest_args.extend(["--directory-stack-name", args.directory_stack_name])
if args.ldaps_nlb_stack_name:
pytest_args.extend(["--ldaps-nlb-stack-name", args.ldaps_nlb_stack_name])
def _set_api_args(args, pytest_args):
if args.api_definition_s3_uri:
pytest_args.extend(["--api-definition-s3-uri", args.api_definition_s3_uri])
if args.public_ecr_image_uri:
pytest_args.extend(["--public-ecr-image-uri", args.public_ecr_image_uri])
if args.api_uri:
pytest_args.extend(["--api-uri", args.api_uri])
if args.api_infrastructure_s3_uri:
pytest_args.extend(["--api-infrastructure-s3-uri", args.api_infrastructure_s3_uri])
def _set_tests_config_args(args, pytest_args, out_dir):
# Dump the rendered file to avoid re-rendering in pytest processes
rendered_config_file = f"{args.output_dir}/{out_dir}/tests_config.yaml"
with open(rendered_config_file, "x", encoding="utf-8") as text_file:
text_file.write(dump_rendered_config_file(args.tests_config))
pytest_args.append(f"--tests-config-file={rendered_config_file}")
def _get_pytest_regionalized_args(region, args, our_dir, logs_dir):
return _get_pytest_args(
args=args,
regions=[region],
log_file="{0}/{1}.log".format(logs_dir, region),
out_dir="{0}/{1}".format(our_dir, region),
)
def _get_pytest_non_regionalized_args(args, out_dir, logs_dir):
return _get_pytest_args(
args=args, regions=args.regions, log_file="{0}/all_regions.log".format(logs_dir), out_dir=out_dir
)
def _run_test_in_region(region, args, out_dir, logs_dir):
out_dir_region = "{base_dir}/{out_dir}/{region}".format(base_dir=args.output_dir, out_dir=out_dir, region=region)
os.makedirs(out_dir_region, exist_ok=True)
# Redirect stdout to file
if not args.show_output:
sys.stdout = open("{0}/pytest.out".format(out_dir_region), "w")
pytest_args_regionalized = _get_pytest_regionalized_args(region, args, out_dir, logs_dir)
with TemporaryDirectory() as temp_dir:
pytest_args_regionalized.extend(["--basetemp", temp_dir])
logger.info("Starting pytest in region {0} with params {1}".format(region, pytest_args_regionalized))
pytest.main(pytest_args_regionalized)
def _make_logging_dirs(base_dir):
logs_dir = "{base_dir}/{logs_dir}".format(base_dir=base_dir, logs_dir=LOGS_DIR)
os.makedirs(logs_dir, exist_ok=True)
logger.info("Configured logs dir: {0}".format(logs_dir))
out_dir = "{base_dir}/{out_dir}".format(base_dir=base_dir, out_dir=OUT_DIR)
os.makedirs(out_dir, exist_ok=True)
logger.info("Configured tests output dir: {0}".format(out_dir))
def _run_parallel(args):
jobs = []
if args.regions:
enabled_regions = args.regions
else:
enabled_regions = get_all_regions(args.tests_config)
for region in enabled_regions:
p = multiprocessing.Process(target=_run_test_in_region, args=(region, args, OUT_DIR, LOGS_DIR))
jobs.append(p)
p.start()
for job in jobs:
job.join()
def _check_args(args):
# If --cluster is set only one os, scheduler, instance type and region can be provided
if args.cluster:
if len(args.oss) > 1 or len(args.schedulers) > 1 or len(args.instances) > 1 or len(args.regions) > 1:
logger.error(
"when cluster option is specified, you can have a single value for oss, regions, instances "
"and schedulers and you need to make sure they match the cluster specific ones"
)
exit(1)
if not args.tests_config:
assert_that(args.regions).described_as("--regions cannot be empty").is_not_empty()
assert_that(args.instances).described_as("--instances cannot be empty").is_not_empty()
assert_that(args.oss).described_as("--oss cannot be empty").is_not_empty()
assert_that(args.schedulers).described_as("--schedulers cannot be empty").is_not_empty()
else:
try:
args.tests_config = _test_config_file(args.tests_config)
assert_valid_config(args.tests_config, args.tests_root_dir)
logger.info("Found valid config file:\n%s", dump_rendered_config_file(args.tests_config))
except Exception:
raise argparse.ArgumentTypeError("'{0}' is not a valid test config".format(args.tests_config))
def _run_sequential(args):
# Redirect stdout to file
if not args.show_output:
sys.stdout = open("{0}/{1}/pytest.out".format(args.output_dir, OUT_DIR), "w")
pytest_args_non_regionalized = _get_pytest_non_regionalized_args(args, OUT_DIR, LOGS_DIR)
logger.info("Starting pytest with params {0}".format(pytest_args_non_regionalized))
pytest.main(pytest_args_non_regionalized)
def main():
"""Entrypoint for tests executor."""
if sys.version_info < (3, 7):
logger.error("test_runner requires python >= 3.7")
exit(1)
args = _init_argparser().parse_args()
# Load additional instance types data, if provided.
# This step must be done before loading test config files in order to resolve instance type placeholders.
if args.instance_types_data:
InstanceTypesData.load_additional_instance_types_data(args.instance_types_data)
_check_args(args)
logger.info("Parsed test_runner parameters {0}".format(args))
_make_logging_dirs(args.output_dir)
if args.sequential:
_run_sequential(args)
else:
_run_parallel(args)
logger.info("All tests completed!")
reports_output_dir = "{base_dir}/{out_dir}".format(base_dir=args.output_dir, out_dir=OUT_DIR)
if "junitxml" in args.reports:
generate_junitxml_merged_report(reports_output_dir)
if "json" in args.reports:
logger.info("Generating tests report")
generate_json_report(reports_output_dir)
if "cw" in args.reports:
logger.info("Publishing CloudWatch metrics")
generate_cw_report(reports_output_dir, args.cw_namespace, args.cw_region, args.cw_timestamp_day_start)
if __name__ == "__main__":
main()
|
test_concurrency.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for concurrency libraries."""
import glob
import os
import random
import re
import sys
import threading
import time
from flaky import flaky
import pytest
import coverage
from coverage import env
from coverage.data import line_counts
from coverage.files import abs_file
from coverage.misc import import_local_file
from tests.coveragetest import CoverageTest
from tests.helpers import remove_files
# These libraries aren't always available, we'll skip tests if they aren't.
try:
import multiprocessing
except ImportError: # pragma: only jython
multiprocessing = None
try:
import eventlet
except ImportError:
eventlet = None
try:
import gevent
except ImportError:
gevent = None
try:
import greenlet
except ImportError: # pragma: only jython
greenlet = None
def measurable_line(l):
"""Is this a line of code coverage will measure?
Not blank, not a comment, and not "else"
"""
l = l.strip()
if not l:
return False
if l.startswith('#'):
return False
if l.startswith('else:'):
return False
if env.JYTHON and l.startswith(('try:', 'except:', 'except ', 'break', 'with ')):
# Jython doesn't measure these statements.
return False # pragma: only jython
return True
def line_count(s):
"""How many measurable lines are in `s`?"""
return len(list(filter(measurable_line, s.splitlines())))
def print_simple_annotation(code, linenos):
"""Print the lines in `code` with X for each line number in `linenos`."""
for lineno, line in enumerate(code.splitlines(), start=1):
print(" {} {}".format("X" if lineno in linenos else " ", line))
class LineCountTest(CoverageTest):
"""Test the helpers here."""
run_in_temp_dir = False
def test_line_count(self):
CODE = """
# Hey there!
x = 1
if x:
print("hello")
else:
print("bye")
print("done")
"""
assert line_count(CODE) == 5
# The code common to all the concurrency models.
SUM_RANGE_Q = """
# Above this will be imports defining queue and threading.
class Producer(threading.Thread):
def __init__(self, limit, q):
threading.Thread.__init__(self)
self.limit = limit
self.q = q
def run(self):
for i in range(self.limit):
self.q.put(i)
self.q.put(None)
class Consumer(threading.Thread):
def __init__(self, q, qresult):
threading.Thread.__init__(self)
self.q = q
self.qresult = qresult
def run(self):
sum = 0
while "no peephole".upper():
i = self.q.get()
if i is None:
break
sum += i
self.qresult.put(sum)
def sum_range(limit):
q = queue.Queue()
qresult = queue.Queue()
c = Consumer(q, qresult)
p = Producer(limit, q)
c.start()
p.start()
p.join()
c.join()
return qresult.get()
# Below this will be something using sum_range.
"""
PRINT_SUM_RANGE = """
print(sum_range({QLIMIT}))
"""
# Import the things to use threads.
THREAD = """
import threading
import queue
"""
# Import the things to use eventlet.
EVENTLET = """
import eventlet.green.threading as threading
import eventlet.queue as queue
"""
# Import the things to use gevent.
GEVENT = """
from gevent import monkey
monkey.patch_thread()
import threading
import gevent.queue as queue
"""
# Uncomplicated code that doesn't use any of the concurrency stuff, to test
# the simple case under each of the regimes.
SIMPLE = """
total = 0
for i in range({QLIMIT}):
total += i
print(total)
"""
def cant_trace_msg(concurrency, the_module):
"""What might coverage.py say about a concurrency setting and imported module?"""
# In the concurrency choices, "multiprocessing" doesn't count, so remove it.
if "multiprocessing" in concurrency:
parts = concurrency.split(",")
parts.remove("multiprocessing")
concurrency = ",".join(parts)
if the_module is None:
# We don't even have the underlying module installed, we expect
# coverage to alert us to this fact.
expected_out = (
"Couldn't trace with concurrency=%s, "
"the module isn't installed.\n" % concurrency
)
elif env.C_TRACER or concurrency == "thread" or concurrency == "":
expected_out = None
else:
expected_out = (
"Can't support concurrency=%s with PyTracer, "
"only threads are supported\n" % concurrency
)
return expected_out
class ConcurrencyTest(CoverageTest):
"""Tests of the concurrency support in coverage.py."""
QLIMIT = 1000
def try_some_code(self, code, concurrency, the_module, expected_out=None):
"""Run some concurrency testing code and see that it was all covered.
`code` is the Python code to execute. `concurrency` is the name of
the concurrency regime to test it under. `the_module` is the imported
module that must be available for this to work at all. `expected_out`
is the text we expect the code to produce.
"""
self.make_file("try_it.py", code)
cmd = "coverage run --concurrency=%s try_it.py" % concurrency
out = self.run_command(cmd)
expected_cant_trace = cant_trace_msg(concurrency, the_module)
if expected_cant_trace is not None:
assert out == expected_cant_trace
else:
# We can fully measure the code if we are using the C tracer, which
# can support all the concurrency, or if we are using threads.
if expected_out is None:
expected_out = "%d\n" % (sum(range(self.QLIMIT)))
print(code)
assert out == expected_out
# Read the coverage file and see that try_it.py has all its lines
# executed.
data = coverage.CoverageData(".coverage")
data.read()
# If the test fails, it's helpful to see this info:
fname = abs_file("try_it.py")
linenos = data.lines(fname)
print(f"{len(linenos)}: {linenos}")
print_simple_annotation(code, linenos)
lines = line_count(code)
assert line_counts(data)['try_it.py'] == lines
def test_threads(self):
code = (THREAD + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
def test_threads_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "thread", threading)
def test_eventlet(self):
code = (EVENTLET + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
def test_eventlet_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "eventlet", eventlet)
def test_gevent(self):
code = (GEVENT + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_gevent_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "gevent", gevent)
def test_greenlet(self):
GREENLET = """\
from greenlet import greenlet
def test1(x, y):
z = gr2.switch(x+y)
print(z)
def test2(u):
print(u)
gr1.switch(42)
gr1 = greenlet(test1)
gr2 = greenlet(test2)
gr1.switch("hello", " world")
"""
self.try_some_code(GREENLET, "greenlet", greenlet, "hello world\n42\n")
def test_greenlet_simple_code(self):
code = SIMPLE.format(QLIMIT=self.QLIMIT)
self.try_some_code(code, "greenlet", greenlet)
def test_bug_330(self):
BUG_330 = """\
from weakref import WeakKeyDictionary
import eventlet
def do():
eventlet.sleep(.01)
gts = WeakKeyDictionary()
for _ in range(100):
gts[eventlet.spawn(do)] = True
eventlet.sleep(.005)
eventlet.sleep(.1)
print(len(gts))
"""
self.try_some_code(BUG_330, "eventlet", eventlet, "0\n")
SQUARE_OR_CUBE_WORK = """
def work(x):
# Use different lines in different subprocesses.
if x % 2:
y = x*x
else:
y = x*x*x
return y
"""
SUM_RANGE_WORK = """
def work(x):
return sum_range((x+1)*100)
"""
MULTI_CODE = """
# Above this will be a definition of work().
import multiprocessing
import os
import time
import sys
def process_worker_main(args):
# Need to pause, or the tasks go too quickly, and some processes
# in the pool don't get any work, and then don't record data.
time.sleep(0.02)
ret = work(*args)
return os.getpid(), ret
if __name__ == "__main__": # pragma: no branch
# This if is on a single line so we can get 100% coverage
# even if we have no arguments.
if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1])
pool = multiprocessing.Pool({NPROCS})
inputs = [(x,) for x in range({UPTO})]
outputs = pool.imap_unordered(process_worker_main, inputs)
pids = set()
total = 0
for pid, sq in outputs:
pids.add(pid)
total += sq
print("%d pids, total = %d" % (len(pids), total))
pool.close()
pool.join()
"""
@pytest.mark.skipif(not multiprocessing, reason="No multiprocessing in this Python")
@flaky(max_runs=30) # Sometimes a test fails due to inherent randomness. Try more times.
class MultiprocessingTest(CoverageTest):
"""Test support of the multiprocessing module."""
def try_multiprocessing_code(
self, code, expected_out, the_module, nprocs, concurrency="multiprocessing", args=""
):
"""Run code using multiprocessing, it should produce `expected_out`."""
self.make_file("multi.py", code)
self.make_file(".coveragerc", """\
[run]
concurrency = %s
source = .
""" % concurrency)
for start_method in ["fork", "spawn"]:
if start_method and start_method not in multiprocessing.get_all_start_methods():
continue
remove_files(".coverage", ".coverage.*")
cmd = "coverage run {args} multi.py {start_method}".format(
args=args, start_method=start_method,
)
out = self.run_command(cmd)
expected_cant_trace = cant_trace_msg(concurrency, the_module)
if expected_cant_trace is not None:
assert out == expected_cant_trace
else:
assert out.rstrip() == expected_out
assert len(glob.glob(".coverage.*")) == nprocs + 1
out = self.run_command("coverage combine")
out_lines = out.splitlines()
assert len(out_lines) == nprocs + 1
assert all(
re.fullmatch(r"Combined data file \.coverage\..*\.\d+\.\d+", line)
for line in out_lines
)
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
assert re.search(r"TOTAL \d+ 0 100%", last_line)
def test_multiprocessing_simple(self):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code(code, expected_out, threading, nprocs)
def test_multiprocessing_append(self):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code(code, expected_out, threading, nprocs, args="--append")
def test_multiprocessing_and_gevent(self):
nprocs = 3
upto = 30
code = (
SUM_RANGE_WORK + EVENTLET + SUM_RANGE_Q + MULTI_CODE
).format(NPROCS=nprocs, UPTO=upto)
total = sum(sum(range((x + 1) * 100)) for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.try_multiprocessing_code(
code, expected_out, eventlet, nprocs, concurrency="multiprocessing,eventlet"
)
def test_multiprocessing_with_branching(self):
nprocs = 3
upto = 30
code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto)
total = sum(x*x if x%2 else x*x*x for x in range(upto))
expected_out = f"{nprocs} pids, total = {total}"
self.make_file("multi.py", code)
self.make_file("multi.rc", """\
[run]
concurrency = multiprocessing
branch = True
omit = */site-packages/*
""")
for start_method in ["fork", "spawn"]:
if start_method and start_method not in multiprocessing.get_all_start_methods():
continue
out = self.run_command(f"coverage run --rcfile=multi.rc multi.py {start_method}")
assert out.rstrip() == expected_out
out = self.run_command("coverage combine")
out_lines = out.splitlines()
assert len(out_lines) == nprocs + 1
assert all(
re.fullmatch(r"Combined data file \.coverage\..*\.\d+\.\d+", line)
for line in out_lines
)
out = self.run_command("coverage report -m")
last_line = self.squeezed_lines(out)[-1]
assert re.search(r"TOTAL \d+ 0 \d+ 0 100%", last_line)
def test_multiprocessing_bootstrap_error_handling(self):
# An exception during bootstrapping will be reported.
self.make_file("multi.py", """\
import multiprocessing
if __name__ == "__main__":
with multiprocessing.Manager():
pass
""")
self.make_file(".coveragerc", """\
[run]
concurrency = multiprocessing
_crash = _bootstrap
""")
out = self.run_command("coverage run multi.py")
assert "Exception during multiprocessing bootstrap init" in out
assert "Exception: Crashing because called by _bootstrap" in out
def test_bug890(self):
# chdir in multiprocessing shouldn't keep us from finding the
# .coveragerc file.
self.make_file("multi.py", """\
import multiprocessing, os, os.path
if __name__ == "__main__":
if not os.path.exists("./tmp"): os.mkdir("./tmp")
os.chdir("./tmp")
with multiprocessing.Manager():
pass
print("ok")
""")
self.make_file(".coveragerc", """\
[run]
concurrency = multiprocessing
""")
out = self.run_command("coverage run multi.py")
assert out.splitlines()[-1] == "ok"
def test_coverage_stop_in_threads():
has_started_coverage = []
has_stopped_coverage = []
def run_thread(): # pragma: nested
"""Check that coverage is stopping properly in threads."""
deadline = time.time() + 5
ident = threading.current_thread().ident
if sys.gettrace() is not None:
has_started_coverage.append(ident)
while sys.gettrace() is not None:
# Wait for coverage to stop
time.sleep(0.01)
if time.time() > deadline:
return
has_stopped_coverage.append(ident)
cov = coverage.Coverage()
cov.start()
t = threading.Thread(target=run_thread) # pragma: nested
t.start() # pragma: nested
time.sleep(0.1) # pragma: nested
cov.stop() # pragma: nested
t.join()
assert has_started_coverage == [t.ident]
assert has_stopped_coverage == [t.ident]
def test_thread_safe_save_data(tmpdir):
# Non-regression test for: https://github.com/nedbat/coveragepy/issues/581
# Create some Python modules and put them in the path
modules_dir = tmpdir.mkdir('test_modules')
module_names = [f"m{i:03d}" for i in range(1000)]
for module_name in module_names:
modules_dir.join(module_name + ".py").write("def f(): pass\n")
# Shared variables for threads
should_run = [True]
imported = []
old_dir = os.getcwd()
os.chdir(modules_dir.strpath)
try:
# Make sure that all dummy modules can be imported.
for module_name in module_names:
import_local_file(module_name)
def random_load(): # pragma: nested
"""Import modules randomly to stress coverage."""
while should_run[0]:
module_name = random.choice(module_names)
mod = import_local_file(module_name)
mod.f()
imported.append(mod)
# Spawn some threads with coverage enabled and attempt to read the
# results right after stopping coverage collection with the threads
# still running.
duration = 0.01
for _ in range(3):
cov = coverage.Coverage()
cov.start()
threads = [threading.Thread(target=random_load) for _ in range(10)] # pragma: nested
should_run[0] = True # pragma: nested
for t in threads: # pragma: nested
t.start()
time.sleep(duration) # pragma: nested
cov.stop() # pragma: nested
# The following call used to crash with running background threads.
cov.get_data()
# Stop the threads
should_run[0] = False
for t in threads:
t.join()
if (not imported) and duration < 10: # pragma: only failure
duration *= 2
finally:
os.chdir(old_dir)
should_run[0] = False
|
win32comport_demo.py | # This is a simple serial port terminal demo.
#
# Its primary purpose is to demonstrate the native serial port access offered via
# win32file.
# It uses 3 threads:
# - The main thread, which cranks up the other 2 threads, then simply waits for them to exit.
# - The user-input thread - blocks waiting for a keyboard character, and when found sends it
# out the COM port. If the character is Ctrl+C, it stops, signalling the COM port thread to stop.
# - The COM port thread is simply listening for input on the COM port, and prints it to the screen.
# This demo uses userlapped IO, so that none of the read or write operations actually block (however,
# in this sample, the very next thing we do _is_ block - so it shows off the concepts even though it
# doesnt exploit them.
from win32file import * # The base COM port and file IO functions.
from win32event import * # We use events and the WaitFor[Multiple]Objects functions.
import win32con # constants.
import msvcrt # For the getch() function.
import threading
import sys
def FindModem():
# Snoop over the comports, seeing if it is likely we have a modem.
for i in range(1,5):
port = "COM%d" % (i,)
try:
handle = CreateFile(port,
win32con.GENERIC_READ | win32con.GENERIC_WRITE,
0, # exclusive access
None, # no security
win32con.OPEN_EXISTING,
win32con.FILE_ATTRIBUTE_NORMAL,
None)
# It appears that an available COM port will always success here,
# just return 0 for the status flags. We only care that it has _any_ status
# flags (and therefore probably a real modem)
if GetCommModemStatus(handle) != 0:
return port
except error:
pass # No port, or modem status failed.
return None
# A basic synchronous COM port file-like object
class SerialTTY:
def __init__(self, port):
if type(port)==type(0):
port = "COM%d" % (port,)
self.handle = CreateFile(port,
win32con.GENERIC_READ | win32con.GENERIC_WRITE,
0, # exclusive access
None, # no security
win32con.OPEN_EXISTING,
win32con.FILE_ATTRIBUTE_NORMAL | win32con.FILE_FLAG_OVERLAPPED,
None)
# Tell the port we want a notification on each char.
SetCommMask(self.handle, EV_RXCHAR)
# Setup a 4k buffer
SetupComm(self.handle, 4096, 4096)
# Remove anything that was there
PurgeComm(self.handle, PURGE_TXABORT | PURGE_RXABORT | PURGE_TXCLEAR | PURGE_RXCLEAR )
# Setup for overlapped IO.
timeouts = 0xFFFFFFFF, 0, 1000, 0, 1000
SetCommTimeouts(self.handle, timeouts)
# Setup the connection info.
dcb = GetCommState( self.handle )
dcb.BaudRate = CBR_115200
dcb.ByteSize = 8
dcb.Parity = NOPARITY
dcb.StopBits = ONESTOPBIT
SetCommState(self.handle, dcb)
print "Connected to %s at %s baud" % (port, dcb.BaudRate)
def _UserInputReaderThread(self):
overlapped = OVERLAPPED()
overlapped.hEvent = CreateEvent(None, 1, 0, None)
try:
while 1:
ch = msvcrt.getch()
if ord(ch)==3:
break
WriteFile(self.handle, ch, overlapped)
# Wait for the write to complete.
WaitForSingleObject(overlapped.hEvent, INFINITE)
finally:
SetEvent(self.eventStop)
def _ComPortThread(self):
overlapped = OVERLAPPED()
overlapped.hEvent = CreateEvent(None, 1, 0, None)
while 1:
# XXX - note we could _probably_ just use overlapped IO on the win32file.ReadFile() statement
# XXX but this tests the COM stuff!
rc, mask = WaitCommEvent(self.handle, overlapped)
if rc == 0: # Character already ready!
SetEvent(overlapped.hEvent)
rc = WaitForMultipleObjects([overlapped.hEvent, self.eventStop], 0, INFINITE)
if rc == WAIT_OBJECT_0:
# Some input - read and print it
flags, comstat = ClearCommError( self.handle )
rc, data = ReadFile(self.handle, comstat.cbInQue, overlapped)
WaitForSingleObject(overlapped.hEvent, INFINITE)
sys.stdout.write(data)
else:
# Stop the thread!
# Just incase the user input thread uis still going, close it
sys.stdout.close()
break
def Run(self):
self.eventStop = CreateEvent(None, 0, 0, None)
# Start the reader and writer threads.
user_thread = threading.Thread(target = self._UserInputReaderThread)
user_thread.start()
com_thread = threading.Thread(target = self._ComPortThread)
com_thread.start()
user_thread.join()
com_thread.join()
if __name__=='__main__':
print "Serial port terminal demo - press Ctrl+C to exit"
if len(sys.argv)<=1:
port = FindModem()
if port is None:
print "No COM port specified, and no modem could be found"
print "Please re-run this script with the name of a COM port (eg COM3)"
sys.exit(1)
else:
port = sys.argv[1]
tty = SerialTTY(port)
tty.Run()
|
main_window.py | #!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import sys, time, threading
import os, json, traceback
import shutil
import csv
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
import base64
from functools import partial
from collections import OrderedDict
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash import keystore, get_config
from electroncash.address import Address, ScriptOutput
from electroncash.bitcoin import COIN, TYPE_ADDRESS, TYPE_SCRIPT
from electroncash import networks
from electroncash.plugins import run_hook
from electroncash.i18n import _, ngettext, pgettext
from electroncash.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, format_satoshis_plain_nofloat,
NotEnoughFunds, NotEnoughFundsSlp, NotEnoughUnfrozenFundsSlp, ExcessiveFee,
UserCancelled, InvalidPassword, bh2u, bfh,
format_fee_satoshis, Weak, print_error)
import electroncash.web as web
from electroncash import Transaction
from electroncash import util, bitcoin, commands
from electroncash import paymentrequest
from electroncash.wallet import Multisig_Wallet, sweep_preparations
try:
from electroncash.plot import plot_history
except:
plot_history = None
import electroncash.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit, BTCSatsByteEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .popup_widget import ShowPopupLabel, KillPopupLabel, PopupWidget
from .util import *
import electroncash.slp as slp
from electroncash.slp_coinchooser import SlpCoinChooser
from electroncash.slp_checker import SlpTransactionChecker
from .amountedit import SLPAmountEdit
from electroncash.util import format_satoshis_nofloat
from .slp_create_token_genesis_dialog import SlpCreateTokenGenesisDialog
from .bfp_download_file_dialog import BfpDownloadFileDialog
from .bfp_upload_file_dialog import BitcoinFilesUploadDialog
try:
# pre-load QtMultimedia at app start, if possible
# this is because lazy-loading it from within Python
# callbacks led to crashes on Linux, likely due to
# bugs in PyQt5 (crashes wouldn't happen when testing
# with PySide2!).
from PyQt5.QtMultimedia import QCameraInfo
del QCameraInfo # defensive programming: not always available so don't keep name around
except ImportError as e:
pass # we tried to pre-load it, failure is ok; camera just won't be available
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(Qt.PointingHandCursor)
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electroncash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
# Note: self.clean_up_connections automatically detects signals named XXX_signal and disconnects them on window close.
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
cashaddr_toggled_signal = pyqtSignal()
history_updated_signal = pyqtSignal()
labels_updated_signal = pyqtSignal() # note this signal occurs when an explicit update_labels() call happens. Interested GUIs should also listen for history_updated_signal as well which also indicates labels may have changed.
on_timer_signal = pyqtSignal() # functions wanting to be executed from timer_actions should connect to this signal, preferably via Qt.DirectConnection
status_icon_dict = dict() # app-globel cache of "status_*" -> QIcon instances (for update_status() speedup)
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.wallet = wallet
self.config = config = gui_object.config
self.non_slp_wallet_warning_shown = False
self.force_use_single_change_addr = _('Change addresses behavior is not customizable for SLP wallets') if self.is_slp_wallet else False
if self.force_use_single_change_addr and not self.wallet.use_change:
self.wallet.use_change = True
self.wallet.storage.put('use_change', self.wallet.use_change)
self.network = gui_object.daemon.network
self.network.slp_validity_signal = self.gui_object.slp_validity_signal
self.network.slp_validation_fetch_signal = self.gui_object.slp_validation_fetch_signal
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.not_enough_funds_slp = False
self.not_enough_unfrozen_funds_slp = False
self.op_return_toolong = False
self.internalpluginsdialog = None
self.externalpluginsdialog = None
self.hardwarewalletdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self._tx_dialogs = Weak.Set()
self._slp_dialogs = Weak.Set()
self.tx_update_mgr = TxUpdateMgr(self) # manages network callbacks for 'new_transaction' and 'verified2', and collates GUI updates from said callbacks as a performance optimization
self.is_schnorr_enabled = self.wallet.is_schnorr_enabled # This is a function -- Support for plugins that may be using the 4.0.3 & 4.0.4 API -- this function used to live in this class, before being moved to Abstract_Wallet.
self.send_tab_opreturn_widgets, self.receive_tab_opreturn_widgets = [], [] # defaults to empty list
self._shortcuts = Weak.Set() # keep track of shortcuts and disable them on close
self.create_status_bar()
self.need_update = threading.Event()
self.labels_need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.slp_mgt_tab = self.create_slp_mgt_tab()
self.converter_tab = self.create_converter_tab()
self.slp_history_tab = self.create_slp_history_tab()
self.slp_token_id = None
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
# clears/inits the opreturn widgets
self.on_toggled_opreturn(bool(self.config.get('enable_opreturn')))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.converter_tab, QIcon(":icons/tab_converter.svg"), _("Address Converter"), "converter", True)
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
if self.is_slp_wallet:
add_optional_tab(tabs, self.slp_mgt_tab, QIcon(":icons/tab_slp_icon.png"), _("Tokens"), "tokens")
add_optional_tab(tabs, self.slp_history_tab, QIcon(":icons/tab_slp_icon.png"), _("SLP History"), "slp_history", True)
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.init_menubar()
wrtabs = Weak.ref(tabs) # We use a weak reference here to help along python gc of QShortcut children: prevent the lambdas below from holding a strong ref to self.
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+W"), self, self.close) )
# Below is now addded to the menu as Ctrl+R but we'll also support F5 like browsers do
self._shortcuts.add( QShortcut(QKeySequence("F5"), self, self.update_wallet) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() - 1)%wrtabs().count())) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() + 1)%wrtabs().count())) )
for i in range(tabs.count()):
self._shortcuts.add( QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs() and wrtabs().setCurrentIndex(i)) )
self.cashaddr_toggled_signal.connect(self.update_cashaddr_icon)
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.gui_object.update_available_signal.connect(self.on_update_available) # shows/hides the update_available_button, emitted by update check mechanism when a new version is available
self.history_list.setFocus(True)
self.slp_history_list.setFocus(True)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet()
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['blockchain_updated', 'wallet_updated',
'new_transaction', 'status', 'banner', 'verified2',
'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
self.gui_object.slp_validation_fetch_signal.connect(self.slp_validation_fetch_slot, Qt.QueuedConnection)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
@property
def is_slp_wallet(self):
return self.wallet.is_slp
_first_shown = True
def showEvent(self, event):
super().showEvent(event)
if event.isAccepted() and self._first_shown:
self._first_shown = False
weakSelf = Weak.ref(self)
# do this immediately after this event handler finishes -- noop on everything but linux
QTimer.singleShot(0, lambda: weakSelf() and weakSelf().gui_object.lin_win_maybe_show_highdpi_caveat_msg(weakSelf()))
def update_token_type_combo(self):
self.token_type_combo.clear()
self.receive_token_type_combo.clear()
self.token_type_combo.addItem(QIcon(':icons/tab_coins.png'), 'None', None)
self.receive_token_type_combo.addItem(QIcon(':icons/tab_coins.png'), 'None', None)
try:
token_types = self.wallet.token_types
except AttributeError:
pass
else:
sorted_items = sorted(token_types.items(), key=lambda x:x[1]['name'])
for token_id, i in sorted_items:
if i['decimals'] != '?':
self.token_type_combo.addItem(QIcon(':icons/tab_slp_icon.png'),i['name'], token_id)
self.receive_token_type_combo.addItem(QIcon(':icons/tab_slp_icon.png'),i['name'], token_id)
def on_history(self, event, *args):
# NB: event should always be 'on_history'
if not args or args[0] is self.wallet:
self.new_fx_history_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_history(self):
if self.cleaned_up: return
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_quotes(self):
if self.cleaned_up: return
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def toggle_tab(self, tab, forceStatus = 0):
# forceStatus = 0 , do nothing
# forceStatus = 1 , force Show
# forceStatus = 2 , force hide
if forceStatus==1:
show=True
elif forceStatus==2:
show=False
else:
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_format = _("Hide {tab_description}") if show else _("Show {tab_description}")
item_text = item_format.format(tab_description=tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window, *, raise_if_missing=False):
try:
self.tl_windows.remove(window)
except ValueError:
if raise_if_missing:
raise
''' Window not in list. Suppressing the exception by default makes
writing cleanup handlers easier. Doing it this way fixes #1707. '''
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self), self.wallet.basename())
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
# Issue #662, user got IO error.
# We want them to still get the error displayed to them.
pass
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
#self.print_error("on_network:", event, *args)
if event == 'wallet_updated':
if args[0] is self.wallet:
self.need_update.set()
elif event == 'blockchain_updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_update_mgr.notif_add(args) # added only if this wallet's tx
if args[1] is self.wallet:
self.network_signal.emit(event, args)
elif event == 'verified2':
self.tx_update_mgr.verif_add(args) # added only if this wallet's tx
elif event in ['status', 'banner', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
if self.cleaned_up: return
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'fee':
pass
elif event == 'new_transaction':
self.check_and_reset_receive_address_if_needed()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def _close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
_gs_option_shown = False
_high_data_limit = 1048576 * 100 # 100 MB limit
_high_data_shown = False
def slp_validation_fetch_slot(self, total_bytes_received):
key = 'slp_validator_graphsearch_enabled'
key2 = 'slp_validator_gs_did_nag_once_even_if_was_false'
key3 = 'slp_never_warn_on_high_data'
val, val2, val3 = self.config.get(key), self.config.get(key2), self.config.get(key3)
# This if conditional asks once app-wide. But it only asks if:
# - "gs enabled" key has never been configured (is None)
# *or*
# - "gs enabled" key is False (indicating user configured it to False)
# but it never asked before (we nag the user at least once even if off,
# basically).
if (not ElectrumWindow._gs_option_shown
and (val is None or (val is False and not val2))):
del val, val2
ElectrumWindow._gs_option_shown = True
self.config.set_key(key2, True) # turn off forced "ask at least once" mechanism
res, neverask_chk = self.question(
_("Speed up SLP validation using a Graph Search server?"),
title=_("SLP Graph Search"),
detail_text=_(
"SLP validation can use a Graph Search server, making it"
" blazingly fast. This does, however, mean that your client"
" contacts an additional server on the internet, sharing"
" with it a set of txids you are interested in knowing"
" more about.\n\n"
"Some extremely privacy-minded users may opt out of this"
" speedy facility in light of that fact, and choose to use"
" the older, slower method of simply relying on the"
" ElectronX servers to do SLP token validation.\n\n"
"If unsure what to answer now, you may always toggle this"
" facility on/off from the Network Dialog later."),
checkbox_text=_("Don't ask again"))
if res:
self.config.set_key(key, True)
elif neverask_chk:
# set to actual False rather than None to indicate we never
# want to be asked.
self.config.set_key(key, False)
elif (val3 is None or val3 is False) \
and total_bytes_received >= ElectrumWindow._high_data_limit \
and not ElectrumWindow._high_data_shown:
ElectrumWindow._high_data_shown = True
res, neverask_chk = self.question(
_("SLP Graph Search has downloaded 100 MB in data and will continue to download data."
"Disabling Graph Search would slow down the rate of downloading.\n\n"
"Continue using SLP Graph Search?"),
title=_("High Data Usage"),
detail_text=_(
"SLP validation can use a Graph Search server, making it"
" blazingly fast. This does, however, mean that your client"
" uses additional data and bandwidth to download"
" all of the transactions it needs to validate your tokens.\n\n"
"Disabling Graph Search will reduce the speed of "
"If unsure what to answer now, you may always toggle this"
" facility on/off from the Network Dialog later."),
checkbox_text=_("Don't ask again")
)
# TODO: This message should also be displayed based on ElectrumX validation data downloaded
if res is False:
self.config.set_key(key, False)
if neverask_chk:
# set to actual False rather than None to indicate we never
# want to be asked.
self.config.set_key(key3, True)
def load_wallet(self):
self.wallet.thread = TaskThread(self, self.on_error, name = self.wallet.diagnostic_name() + '/Wallet')
self.wallet.ui_emit_validity_updated = self.gui_object.slp_validity_signal.emit
self.wallet.ui_emit_validation_fetch = self.gui_object.slp_validation_fetch_signal.emit
self.update_recently_visited(self.wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
if self.is_slp_wallet:
self.slp_history_list.update()
self.token_list.update()
self.update_token_type_combo()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.tray.isVisible():
self.hide()
else:
self.show()
if self._is_invalid_testnet_wallet():
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
self._rebuild_history_action.setEnabled(False)
self._warn_if_invalid_testnet_wallet()
self.watching_only_changed()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
if self.is_slp_wallet:
self.toggle_cashaddr(2, True)
self.toggle_tab(self.slp_mgt_tab, 1)
self.toggle_tab(self.slp_history_tab, 1)
else:
self.toggle_cashaddr(1, True)
self.update_receive_address_widget()
self.address_list.update()
self.utxo_list.update()
self.slp_mgt_tab.update()
self.slp_history_tab.update()
self.update_cashaddr_icon()
run_hook('load_wallet', self.wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = '%s %s - %s' % (networks.net.TITLE,
self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoin Cash with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoin Cash to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def _is_invalid_testnet_wallet(self):
if not networks.net.TESTNET:
return False
is_old_bad = False
xkey = ((hasattr(self.wallet, 'get_master_public_key') and self.wallet.get_master_public_key())
or None)
if xkey:
from electroncash.bitcoin import deserialize_xpub, InvalidXKeyFormat
try:
xp = deserialize_xpub(xkey)
except InvalidXKeyFormat:
is_old_bad = True
return is_old_bad
def _warn_if_invalid_testnet_wallet(self):
''' This was added after the upgrade from the bad xpub testnet wallets
to the good tpub testnet wallet format in version 3.3.6. See #1164.
We warn users if they are using the bad wallet format and instruct
them on how to upgrade their wallets.'''
is_old_bad = self._is_invalid_testnet_wallet()
if is_old_bad:
msg = ' '.join([
_("This testnet wallet has an invalid master key format."),
_("(Old versions of Electron Cash before 3.3.6 produced invalid testnet wallets)."),
'<br><br>',
_("In order to use this wallet without errors with this version of EC, please <b>re-generate this wallet from seed</b>."),
"<br><br><em><i>~SPV stopped~</i></em>"
])
self.show_critical(msg, title=_('Invalid Master Key'), rich_text=True)
return is_old_bad
def _warn_slp_prefers_slp_wallets_if_not_slp_wallet(self):
if not self.is_slp_wallet and not self.non_slp_wallet_warning_shown:
msg = '\n\n'.join([
_("WARNING: SLP Tokens Disabled."),
_("SLP tokens were detected in this older style wallet file and this version does not allow use of SLP tokens for your protection."),
_("Please install version 3.4.6 to create a new SLP wallet file and then transfer the tokens from this wallet file to the new 3.4.6 style wallet file."),
_("Why? This is because Electron Cash SLP versions 3.4.3 and later all include a significant security improvement for SLP tokens. That is, all standard wallet files created with 3.4.3 and later use BIP-44 key derivation path m/44'/245' to reduce the risk of burning SLP tokens. Taking no action could result in burning your tokens if this wallet's seed is imported into a non-SLP aware wallet."),
_('''If you're wondering "what do I have to do?":'''),
_("If you want to recover the SLP tokens in this wallet file you need to install version 3.4.6 of this software and follow the instructions provided above.")
])
self.show_warning(msg, title=_("SLP Tokens Detected in a Non-SLP Wallet"))
self.non_slp_wallet_warning_shown = True
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
if not os.path.exists(wallet_folder):
wallet_folder = None
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
if filename.lower().endswith('.txn'):
# they did File -> Open on a .txn, just do that.
self.do_process_from_file(fileName=filename)
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
# Copy file contents
shutil.copyfile(path, new_path)
# Copy file attributes if possible
# (not supported on targets like Flatpak documents)
try:
shutil.copystat(path, new_path)
except (IOError, os.error):
pass
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent2 = []
for k in recent:
if os.path.exists(k):
recent2.append(k)
recent = recent2[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
gui_object = self.gui_object
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return self.gui_object.get_wallet_folder()
def new_wallet(self):
try:
full_path = self.gui_object.get_new_wallet_path()
except FileNotFoundError as e:
self.show_error(str(e))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = self.menuBar()
menubar.setObjectName(self.diagnostic_name() + ".QMenuBar")
destroyed_print_error(menubar)
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("Open &Recent"))
file_menu.addAction(_("&Open") + "...", self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore") + "...", self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy As") + "...", self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("&Delete") + "...", self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close).setShortcut(QKeySequence.Quit)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys, QKeySequence("Ctrl+I"))
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password") + "...", self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("Private Keys"))
self.private_keys_menu.addAction(_("&Sweep") + "...", self.sweep_key_dialog).setDisabled(True) # always disable in SLP for now
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import") + "...", self.do_import_privkey)
self.export_menu = self.private_keys_menu.addMenu(_("&Export"))
self.export_menu.addAction(_("&WIF Plaintext") + "...", self.export_privkeys_dialog)
self.export_menu.addAction(_("&BIP38 Encrypted") + "...", self.export_bip38_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses") + "...", self.import_addresses)
wallet_menu.addSeparator()
self._rebuild_history_action = wallet_menu.addAction(_("&Rebuild History") + "...", self.rebuild_history)
self._scan_beyond_gap_action = wallet_menu.addAction(_("Scan &More Addresses..."), self.scan_beyond_gap)
self._scan_beyond_gap_action.setEnabled(bool(self.wallet.is_deterministic() and self.network))
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import") + "...", self.do_import_labels)
labels_menu.addAction(_("&Export") + "...", self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("&Contacts"))
contacts_menu.addAction(_("&New") + "...", self.new_contact_dialog)
contacts_menu.addAction(_("Import") + "...", lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export") + "...", lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import") + "...", lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
#hist_menu.addAction(_("Plot"), self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction(_("Export") + "...", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("&Find"), self.toggle_search, QKeySequence("Ctrl+F"))
wallet_menu.addAction(_("Refresh GUI"), self.update_wallet, QKeySequence("Ctrl+R"))
def add_toggle_action(view_menu, tab):
is_shown = self.tabs.indexOf(tab) > -1
item_format = _("Hide {tab_description}") if is_shown else _("Show {tab_description}")
item_name = item_format.format(tab_description=tab.tab_description)
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.converter_tab)
add_toggle_action(view_menu, self.console_tab)
if self.is_slp_wallet:
add_toggle_action(view_menu, self.slp_mgt_tab)
add_toggle_action(view_menu, self.slp_history_tab)
tools_menu = menubar.addMenu(_("&Tools"))
prefs_tit = _("Preferences") + "..."
a = tools_menu.addAction(prefs_tit, self.settings_dialog, QKeySequence("Ctrl+,") ) # Note: on macOS this hotkey sequence won't be shown in the menu (since it's reserved by the system), but will still work. :/
if sys.platform == 'darwin':
# This turns off the heuristic matching based on name and keeps the
# "Preferences" action out of the application menu and into the
# actual menu we specified on macOS.
a.setMenuRole(QAction.NoRole)
gui_object = self.gui_object
weakSelf = Weak.ref(self)
tools_menu.addAction(_("&Network") + "...", lambda: gui_object.show_network_dialog(weakSelf()), QKeySequence("Ctrl+K"))
tools_menu.addAction(_("Optional &Features") + "...", self.internal_plugins_dialog, QKeySequence("Shift+Ctrl+P"))
tools_menu.addAction(_("Installed &Plugins") + "...", self.external_plugins_dialog, QKeySequence("Ctrl+P"))
if sys.platform in ('linux', 'linux2', 'linux3'):
tools_menu.addSeparator()
tools_menu.addAction(_("&Hardware Wallet Support..."), self.hardware_wallet_support)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/Verify Message") + "...", self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/Decrypt Message") + "...", self.encrypt_message)
tools_menu.addSeparator()
tools_menu.addAction(_("Upload a file using BFP"), lambda: BitcoinFilesUploadDialog(self, None, True, "Upload a File Using BFP"))
tools_menu.addAction(_("Download a file using BFP"), lambda: BfpDownloadFileDialog(self,))
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to Many"), self.paytomany, QKeySequence("Ctrl+M"))
raw_transaction_menu = tools_menu.addMenu(_("&Load Transaction"))
raw_transaction_menu.addAction(_("From &File") + "...", self.do_process_from_file)
raw_transaction_menu.addAction(_("From &Text") + "...", self.do_process_from_text, QKeySequence("Ctrl+T"))
raw_transaction_menu.addAction(_("From the &Blockchain") + "...", self.do_process_from_txid, QKeySequence("Ctrl+B"))
raw_transaction_menu.addAction(_("From &QR Code") + "...", self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("About Qt"), self.app.aboutQt)
help_menu.addAction(_("&Check for Updates"), lambda: self.gui_object.show_update_checker(self))
help_menu.addAction(_("&Official Website"), lambda: webopen("https://electroncash.org"))
help_menu.addSeparator()
help_menu.addAction(_("Documentation"), lambda: webopen("http://electroncash.readthedocs.io/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug..."), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to Server") + "...", self.donate_to_server)
def donate_to_server(self):
if self.gui_object.warn_if_no_network(self):
return
d = {}
spv_address = self.network.get_donation_address()
spv_prefix = _("Blockchain Server")
donation_for = _("Donation for")
if spv_address:
host = self.network.get_parameters()[0]
d[spv_prefix + ": " + host] = spv_address
plugin_servers = run_hook('donation_address', self, multi=True)
for tup in plugin_servers:
if not isinstance(tup, (list, tuple)) or len(tup) != 2:
continue
desc, address = tup
if (desc and address and isinstance(desc, str) and isinstance(address, Address)
and desc not in d and not desc.lower().startswith(spv_prefix.lower())):
d[desc] = address.to_ui_string()
def do_payto(desc):
addr = d[desc]
# The message is intentionally untranslated, leave it like that
self.pay_to_URI('{pre}:{addr}?message={donation_for} {desc}'
.format(pre = networks.net.CASHADDR_PREFIX,
addr = addr,
donation_for = donation_for,
desc = desc))
if len(d) == 1:
do_payto(next(iter(d.keys())))
elif len(d) > 1:
choices = tuple(d.keys())
index = self.query_choice(_('Please select which server you would like to donate to:'), choices, add_cancel_button = True)
if index is not None:
do_payto(choices[index])
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electron Cash SLP",
"<p><font size=+3><b>Electron Cash SLP</b></font></p><p>" + _("Version") + f" {self.wallet.electrum_version}" + "</p>" +
'<span style="font-size:11pt; font-weight:500;"><p>' +
_("Copyright © {year_start}-{year_end} Electron Cash LLC and the Electron Cash developers.").format(year_start=2017, year_end=2020) +
"</p><p>" + _("darkdetect for macOS © 2019 Alberto Sottile") + "</p>"
"</span>" +
'<span style="font-weight:200;"><p>' +
_("Electron Cash's focus is speed, with low resource usage and simplifying Bitcoin Cash. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin Cash system.") +
"</p></span>"
)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=https://github.com/simpleledger/Electron-Cash-SLP/issues>https://github.com/simpleledger/Electron-Cash-SLP/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electron Cash (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electron Cash - " + _("Reporting Bugs"), rich_text = True)
def notify(self, message):
self.gui_object.notify(message)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
return __class__.static_getOpenFileName(title=title, filter=filter, config=self.config, parent=self)
def getSaveFileName(self, title, filename, filter = ""):
return __class__.static_getSaveFileName(title=title, filename=filename, filter=filter, config=self.config, parent=self)
@staticmethod
def static_getOpenFileName(*, title, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
fileName, __ = QFileDialog.getOpenFileName(parent, title, directory, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
@staticmethod
def static_getSaveFileName(*, title, filename, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(parent, title, path, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self._update_wallet() # will clear flag when it runs. (also clears labels_need_update as well)
if self.labels_need_update.is_set():
self._update_labels() # will clear flag when it runs.
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
# hook for other classes to be called here. For example the tx_update_mgr is called here (see TxUpdateMgr.do_check).
self.on_timer_signal.emit()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount, is_diff=False):
text = self.format_amount(amount, is_diff=is_diff) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount, is_diff=is_diff)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
sats_per_byte = format_fee_satoshis(fee_rate/1000, max(self.num_zeros, 1))
return _('{sats_per_byte} sat/byte').format(sats_per_byte=sats_per_byte)
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
if self.decimal_point in util.inv_base_units:
return util.inv_base_units[self.decimal_point]
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / PyDecimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * PyDecimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
_network_status_tip_dict = dict()
def update_status(self):
if not self.wallet:
return
icon_dict = ElectrumWindow.status_icon_dict
if not icon_dict:
# cache the icons to save on CPU overhead per update_status call
icon_dict.update({
"status_disconnected" : QIcon(":icons/status_disconnected.svg"),
"status_waiting" : QIcon(":icons/status_waiting.svg"),
"status_lagging" : QIcon(":icons/status_lagging.svg"),
"status_lagging_fork" : QIcon(":icons/status_lagging_fork.svg"),
"status_connected" : QIcon(":icons/status_connected.svg"),
"status_connected_fork" : QIcon(":icons/status_connected_fork.svg"),
"status_connected_proxy" : QIcon(":icons/status_connected_proxy.svg"),
"status_connected_proxy_fork" : QIcon(":icons/status_connected_proxy_fork.svg"),
"seed_ok" : QIcon(":icons/seed.png"),
"seed_warning" : QIcon(":icons/seed_warning.png")
})
status_tip_dict = ElectrumWindow._network_status_tip_dict
if not status_tip_dict:
# Since we're caching stuff, might as well cache this too
status_tip_dict.update({
"status_disconnected" : _('Network Status') + " - " + _("Offline"),
"status_waiting" : _('Network Status') + " - " + _("Updating..."),
"status_lagging" : _('Network Status') + " - " + '',
"status_lagging_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected" : _('Network Status') + " - " + _("Connected"),
"status_connected_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected_proxy" : _('Network Status') + " - " + _("Connected via proxy"),
"status_connected_proxy_fork" : _('Network Status') + " - " + _("Connected via proxy") + "; " + _("Chain fork(s) detected"),
})
status_tip = ''
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict['status_disconnected']
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
num_chains = len(self.network.get_blockchains())
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = icon_dict["status_waiting"]
status_tip = status_tip_dict["status_waiting"]
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
if num_chains <= 1:
icon = icon_dict["status_lagging"]
status_tip = status_tip_dict["status_lagging"] + text
else:
icon = icon_dict["status_lagging_fork"]
status_tip = status_tip_dict["status_lagging_fork"] + "; " + text
else:
text = ""
if not self.is_slp_wallet:
text += "Tokens Disabled - "
else:
token_id = self.slp_token_id
try:
d = self.wallet.token_types[token_id]
except (AttributeError, KeyError):
pass
else:
bal = format_satoshis_nofloat(self.wallet.get_slp_token_balance(token_id, { 'user_config': { 'confirmed_only': False } })[0],
decimal_point=d['decimals'],)
text += "%s Token Balance: %s; "%(d['name'], bal)
c, u, x = self.wallet.get_balance()
text += _("BCH Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
n_unverif = self.wallet.get_unverified_tx_pending_count()
if n_unverif >= 10:
# if there are lots left to verify, display this informative text
text += " " + ( _("[%d unverified TXs]") % n_unverif )
if not self.network.proxy:
icon = icon_dict["status_connected"] if num_chains <= 1 else icon_dict["status_connected_fork"]
status_tip = status_tip_dict["status_connected"] if num_chains <= 1 else status_tip_dict["status_connected_fork"]
else:
icon = icon_dict["status_connected_proxy"] if num_chains <= 1 else icon_dict["status_connected_proxy_fork"]
status_tip = status_tip_dict["status_connected_proxy"] if num_chains <= 1 else status_tip_dict["status_connected_proxy_fork"]
# Provide extra warning and instructions to user if he/she has tokens in a non-SLP wallet type.
if not self.is_slp_wallet:
locked_in_slp = self.wallet.get_slp_locked_balance()
if locked_in_slp > 0:
self._warn_slp_prefers_slp_wallets_if_not_slp_wallet()
else:
text = _("Not connected")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict["status_disconnected"]
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
addr_format = self.config.get('addr_format', 1)
self.setAddrFormatText(addr_format)
self.status_button.setIcon( icon )
self.status_button.setStatusTip( status_tip )
if self.wallet.has_seed():
if self.wallet.storage.get('wallet_seed_needs_backup'):
self.seed_button.setIcon(icon_dict["seed_warning"])
self.seed_button.setToolTip(_("Seed Requires Backup!"))
self.seed_button.setStatusTip(self.seed_button.toolTip())
else:
self.seed_button.setIcon(icon_dict["seed_ok"])
self.seed_button.setToolTip(_("Seed"))
self.seed_button.setStatusTip(None)
def update_wallet(self):
self.need_update.set() # will enqueue an _update_wallet() call in at most 0.5 seconds from now.
def _update_wallet(self):
''' Called by self.timer_actions every 0.5 secs if need_update flag is set.
Note that the flag is actually cleared by update_tabs.'''
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
@rate_limited(1.0, classlevel=True, ts_after=True) # Limit tab updates to no more than 1 per second, app-wide. Multiple calls across instances will be collated into 1 deferred series of calls (1 call per extant instance)
def update_tabs(self):
if self.cleaned_up: return
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
if self.is_slp_wallet:
self.slp_history_list.update()
self.token_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history, also clears self.tx_update_mgr.verif_q
self.need_update.clear() # clear flag
if self.labels_need_update.is_set():
# if flag was set, might as well declare the labels updated since they necessarily were due to a full update.
self.labels_updated_signal.emit() # just in case client code was waiting for this signal to proceed.
self.labels_need_update.clear() # clear flag
def update_labels(self):
self.labels_need_update.set() # will enqueue an _update_labels() call in at most 0.5 seconds from now
@rate_limited(1.0)
def _update_labels(self):
''' Called by self.timer_actions every 0.5 secs if labels_need_update flag is set. '''
if self.cleaned_up: return
self.history_list.update_labels()
self.address_list.update_labels()
self.utxo_list.update_labels()
self.update_completions()
self.labels_updated_signal.emit()
self.labels_need_update.clear() # clear flag
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def create_slp_history_tab(self):
from .slp_history_list import HistoryList
self.slp_history_list = l = HistoryList(self)
return self.create_list_tab(l)
def show_address(self, addr, *, parent=None):
parent = parent or self.top_level_window()
from . import address_dialog
d = address_dialog.AddressDialog(self, addr, windowParent=parent)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
d = show_transaction(tx, self, tx_desc)
self._tx_dialogs.add(d)
def addr_toggle_slp(self, force_slp=False):
def present_slp():
self.toggle_cashaddr(2, True)
self.receive_slp_token_type_label.setDisabled(False)
self.receive_slp_amount_e.setDisabled(False)
self.receive_slp_amount_label.setDisabled(False)
if force_slp:
present_slp()
return
if Address.FMT_UI == Address.FMT_SLPADDR:
self.toggle_cashaddr(1, True)
self.receive_token_type_combo.setCurrentIndex(0)
else:
present_slp()
def on_toggled_opreturn(self, b):
''' toggles opreturn-related widgets for both the receive and send
tabs'''
b = bool(b)
self.config.set_key('enable_opreturn', b)
# send tab
if not b:
self.message_opreturn_e.setText("")
self.op_return_toolong = False
for x in self.send_tab_opreturn_widgets:
x.setVisible(b)
# receive tab
for x in self.receive_tab_opreturn_widgets:
x.setVisible(b)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton()
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin Cash address where the payment should be received. Note that each payment request uses a different Bitcoin Cash address.')
label = HelpLabel(_('&Receiving address'), msg)
label.setBuddy(self.receive_address_e)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.cashaddr_toggled_signal.connect(self.update_receive_address_widget)
grid.addWidget(label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
if self.is_slp_wallet:
self.show_slp_addr_btn = QPushButton(_('Show Token Address'))
self.show_slp_addr_btn.clicked.connect(self.addr_toggle_slp)
grid.addWidget(self.show_slp_addr_btn, 1, 1)
self.receive_message_e = QLineEdit()
label = QLabel(_('&Description'))
label.setBuddy(self.receive_message_e)
grid.addWidget(label, 2, 0)
grid.addWidget(self.receive_message_e, 2, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
# OP_RETURN requests
self.receive_opreturn_e = QLineEdit()
msg = _("You may optionally append an OP_RETURN message to the payment URI and/or QR you generate.\n\nNote: Not all wallets yet support OP_RETURN parameters, so make sure the other party's wallet supports OP_RETURN URIs.")
self.receive_opreturn_label = label = HelpLabel(_('&OP_RETURN'), msg)
label.setBuddy(self.receive_opreturn_e)
self.receive_opreturn_rawhex_cb = QCheckBox(_('Raw &hex script'))
self.receive_opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
grid.addWidget(label, 3, 0)
grid.addWidget(self.receive_opreturn_e, 3, 1, 1, 3)
grid.addWidget(self.receive_opreturn_rawhex_cb, 3, 4, Qt.AlignLeft)
self.receive_opreturn_e.textChanged.connect(self.update_receive_qr)
self.receive_opreturn_rawhex_cb.clicked.connect(self.update_receive_qr)
self.receive_tab_opreturn_widgets = [
self.receive_opreturn_e,
self.receive_opreturn_rawhex_cb,
self.receive_opreturn_label,
]
msg = _('Select the SLP token to Request.')
self.receive_token_type_combo = QComboBox()
if ColorScheme.dark_scheme and sys.platform == 'darwin':
# Hack/Workaround to QDarkStyle bugs; see https://github.com/ColinDuquesnoy/QDarkStyleSheet/issues/169#issuecomment-494647801
self.receive_token_type_combo.setItemDelegate(QStyledItemDelegate(self.receive_token_type_combo))
self.receive_token_type_combo.setFixedWidth(200)
self.receive_token_type_combo.currentIndexChanged.connect(self.on_slptok_receive)
#self.receive_token_type_combo.currentIndexChanged.connect(self.update_buttons_on_seed) # update 'CoinText' button, etc
self.receive_slp_token_type_label = HelpLabel(_('Token Type'), msg)
grid.addWidget(self.receive_slp_token_type_label, 4, 0)
grid.addWidget(self.receive_token_type_combo, 4, 1)
self.receive_slp_amount_e = SLPAmountEdit('tokens', 0)
self.receive_slp_amount_e.setFixedWidth(self.receive_token_type_combo.width())
self.receive_slp_amount_label = QLabel(_('Req. token amount'))
grid.addWidget(self.receive_slp_amount_label, 5, 0)
grid.addWidget(self.receive_slp_amount_e, 5, 1)
self.receive_slp_amount_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
self.receive_amount_e.setFixedWidth(self.receive_token_type_combo.width())
self.receive_amount_label = QLabel(_('Requested &amount'))
self.receive_amount_label.setBuddy(self.receive_amount_e)
grid.addWidget(self.receive_amount_label, 6, 0)
grid.addWidget(self.receive_amount_e, 6, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
if Address.FMT_UI != Address.FMT_SLPADDR:
self.receive_token_type_combo.setDisabled(True)
self.receive_slp_token_type_label.setDisabled(True)
self.receive_slp_amount_e.setDisabled(True)
self.receive_slp_amount_label.setDisabled(True)
else:
self.receive_token_type_combo.setDisabled(False)
self.receive_slp_token_type_label.setDisabled(False)
self.receive_slp_amount_e.setDisabled(False)
self.receive_slp_amount_label.setDisabled(False)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 6, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([_(i[0]) for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin Cash addresses.'),
_('The Bitcoin Cash address never expires and will always be part of this Electron Cash wallet.'),
])
label = HelpLabel(_('Request &expires'), msg)
label.setBuddy(self.expires_combo)
grid.addWidget(label, 7, 0)
grid.addWidget(self.expires_combo, 7, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.hide()
grid.addWidget(self.expires_label, 7, 1)
self.save_request_button = QPushButton(_('&Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('&Clear'))
self.new_request_button.clicked.connect(self.new_payment_request)
weakSelf = Weak.ref(self)
class MyQRCodeWidget(QRCodeWidget):
def mouseReleaseEvent(self, e):
''' to make the QRWidget clickable '''
weakSelf() and weakSelf().show_qr_window()
self.receive_qr = MyQRCodeWidget(fixedSize=200)
self.receive_qr.setCursor(QCursor(Qt.PointingHandCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
buttons.addStretch(1)
grid.addLayout(buttons, 8, 1, 1, -1)
self.receive_requests_label = QLabel(_('Re&quests'))
from .request_list import RequestList
self.request_list = RequestList(self)
self.request_list.chkVisible()
self.receive_requests_label.setBuddy(self.request_list)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
vbox2 = QVBoxLayout()
vbox2.setContentsMargins(0,0,0,0)
vbox2.setSpacing(4)
vbox2.addWidget(self.receive_qr, Qt.AlignHCenter|Qt.AlignTop)
self.receive_qr.setToolTip(_('Receive request QR code (click for details)'))
but = uribut = QPushButton(_('Copy &URI'))
def on_copy_uri():
if self.receive_qr.data:
uri = str(self.receive_qr.data)
self.copy_to_clipboard(uri, _('Receive request URI copied to clipboard'), uribut)
but.clicked.connect(on_copy_uri)
but.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
but.setToolTip(_('Click to copy the receive request URI to the clipboard'))
vbox2.addWidget(but)
vbox2.setAlignment(but, Qt.AlignHCenter|Qt.AlignVCenter)
hbox.addLayout(vbox2)
class ReceiveTab(QWidget):
def showEvent(self, e):
super().showEvent(e)
if e.isAccepted():
slf = weakSelf()
if slf:
slf.check_and_reset_receive_address_if_needed()
if self.main_window.is_slp_wallet:
if Address.FMT_UI == Address.FMT_SLPADDR:
self.main_window.show_slp_addr_btn.setText("Show BCH Address")
else:
self.main_window.show_slp_addr_btn.setText("Show Token Address")
else:
self.main_window.toggle_cashaddr(1, True)
w = ReceiveTab()
w.low_balance_warning_shown = False
w.main_window = self
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.address_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_storage_string(), '')
amount = req['amount']
op_return = req.get('op_return')
op_return_raw = req.get('op_return_raw') if not op_return else None
URI = web.create_URI(addr, amount, message, op_return=op_return, op_return_raw=op_return_raw, token_id=req.get('token_id'))
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
if self.receive_token_type_combo.currentData() is not None:
amount = float(self.receive_slp_amount_e.text())
else:
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
kwargs = {}
opr = self.receive_opreturn_e.text().strip()
if opr:
# save op_return, if any
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
kwargs[arg] = opr
if self.receive_token_type_combo.currentData() is not None:
tokenid = self.receive_token_type_combo.currentData()
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration, token_id=tokenid, **kwargs)
else:
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration, **kwargs)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(self.receive_address)
self.request_list.update()
self.request_list.select_item_by_address(req.get('address')) # when adding items to the view the current selection may not reflect what's in the UI. Make sure it's selected.
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self.top_level_window(), title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
self.receive_token_type_combo.setCurrentIndex(0)
self.receive_slp_amount_e.setText("")
addr = self.wallet.get_unused_address(frozen_ok=False)
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
# New! Since the button is called 'Clear' now, we let them proceed with a re-used address
addr = self.wallet.get_receiving_address()
else:
# Warn if past gap limit.
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None) # We want the current item to always reflect what's in the UI. So if new, clear selection.
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_opreturn_rawhex_cb.setChecked(False)
self.receive_opreturn_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_full_ui_string()
self.receive_address_e.setText(text)
@rate_limited(0.250, ts_after=True) # this function potentially re-computes the QR widget, so it's rate limited to once every 250ms
def check_and_reset_receive_address_if_needed(self):
''' Check to make sure the receive tab is kosher and doesn't contain
an already-used address. This should be called from the showEvent
for the tab. '''
if not self.wallet.use_change or self.cleaned_up:
# if they don't care about change addresses, they are ok
# with re-using addresses, so skip this check.
return
# ok, they care about anonymity, so make sure the receive address
# is always an unused address.
if (not self.receive_address # this should always be defined but check anyway
or self.receive_address in self.wallet.frozen_addresses # make sure it's not frozen
or (self.wallet.get_address_history(self.receive_address) # make a new address if it has a history
and not self.wallet.get_payment_request(self.receive_address, self.config))): # and if they aren't actively editing one in the request_list widget
addr = self.wallet.get_unused_address(frozen_ok=False) # try unused, not frozen
if addr is None:
if self.wallet.is_deterministic():
# creae a new one if deterministic
addr = self.wallet.create_new_address(False)
else:
# otherwise give up and just re-use one.
addr = self.wallet.get_receiving_address()
self.receive_address = addr
self.update_receive_address_widget()
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None)
self.set_receive_address(self.wallet.get_receiving_address(frozen_ok=False))
def show_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window()
self.qr_window.setAttribute(Qt.WA_DeleteOnClose, True)
weakSelf = Weak.ref(self)
def destroyed_clean(x):
if weakSelf():
weakSelf().qr_window = None
weakSelf().print_error("QR Window destroyed.")
self.qr_window.destroyed.connect(destroyed_clean)
self.update_receive_qr()
if self.qr_window.isMinimized():
self.qr_window.showNormal()
else:
self.qr_window.show()
self.qr_window.raise_()
self.qr_window.activateWindow()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.update_receive_address_widget()
def update_receive_qr(self):
if self.receive_token_type_combo.currentData() is not None and self.receive_slp_amount_e.text() is not '':
amount = self.receive_slp_amount_e.text() # if self.receive_slp_amount_e.text() is not '' else None
token_id = self.receive_token_type_combo.currentData()
else:
amount = self.receive_amount_e.get_amount()
token_id = None
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
kwargs = {}
if self.receive_opreturn_e.isVisible():
# set op_return if enabled
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
opret = self.receive_opreturn_e.text()
if opret:
kwargs[arg] = opret
# Special case hack -- see #1473. Omit bitcoincash: prefix from
# legacy address if no other params present in receive request.
if Address.FMT_UI == Address.FMT_LEGACY and not kwargs and not amount and not message:
uri = self.receive_address.to_ui_string()
elif not token_id:
# Otherwise proceed as normal, prepending bitcoincash: to URI
uri = web.create_URI(self.receive_address, amount, message, **kwargs)
else:
uri = web.create_URI(self.receive_address, amount, message, **kwargs, token_id=token_id)
self.receive_qr.setData(uri)
if self.qr_window:
self.qr_window.set_content(self, self.receive_address_e.text(), amount,
message, uri, **kwargs)
if self.is_slp_wallet:
if Address.FMT_UI == Address.FMT_SLPADDR:
self.show_slp_addr_btn.setText("Show BCH Address")
else:
self.show_slp_addr_btn.setText("Show Token Address")
def on_slptok(self):
self.slp_token_id = self.token_type_combo.currentData()
self.payto_e.check_text()
self.slp_amount_e.setText("")
if self.slp_token_id is None:
self.amount_e.setDisabled(False)
self.amount_label.setDisabled(False)
self.max_button.setDisabled(False)
self.fiat_send_e.setDisabled(False)
self.slp_extra_bch_cb.setHidden(True)
self.slp_amount_e.setDisabled(True)
self.slp_max_button.setDisabled(True)
self.slp_amount_label.setDisabled(True)
self.message_opreturn_e.setEnabled(True)
self.opreturn_rawhex_cb.setEnabled(True)
self.opreturn_label.setEnabled(True)
else:
self.slp_extra_bch_cb.setHidden(False)
self.slp_extra_bch_cb.setChecked(False)
self.slp_extra_bch_cb.clicked.emit()
self.slp_amount_e.setDisabled(False)
self.slp_max_button.setDisabled(False)
self.slp_amount_label.setDisabled(False)
tok = self.wallet.token_types[self.slp_token_id]
self.slp_amount_e.set_token(tok['name'][:6],tok['decimals'])
self.message_opreturn_e.setEnabled(False)
self.message_opreturn_e.setText('')
self.opreturn_rawhex_cb.setEnabled(False)
self.opreturn_label.setEnabled(False)
self.update_status()
self.do_update_fee()
def on_slptok_receive(self):
self.receive_slp_amount_e.setText("")
self.receive_amount_e.setText("")
slp_token_id = self.receive_token_type_combo.currentData()
self.update_receive_qr()
if slp_token_id is None:
self.receive_slp_amount_e.setDisabled(True)
self.receive_slp_amount_label.setDisabled(True)
self.receive_amount_e.setDisabled(False)
self.receive_amount_label.setDisabled(False)
self.fiat_receive_e.setDisabled(False)
else:
self.addr_toggle_slp(True)
self.receive_slp_amount_e.setDisabled(False)
self.receive_slp_amount_label.setDisabled(False)
self.receive_amount_e.setDisabled(True)
self.receive_amount_label.setDisabled(True)
self.fiat_receive_e.setDisabled(True)
tok = self.wallet.token_types[slp_token_id]
self.receive_slp_amount_e.set_token(tok['name'][:6],tok['decimals'])
def on_slp_extra_bch(self):
if self.slp_extra_bch_cb.isChecked():
self.amount_e.setDisabled(False)
self.amount_label.setDisabled(False)
self.max_button.setDisabled(False)
self.fiat_send_e.setDisabled(False)
else:
self.amount_e.setText('')
self.max_button.setChecked(False)
self.amount_e.setDisabled(True)
self.amount_label.setDisabled(True)
self.max_button.setDisabled(True)
self.fiat_send_e.setDisabled(True)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.parent = self
self.slp_send_tab_widgets = []
if self.is_slp_wallet:
self.slp_amount_e = SLPAmountEdit('tokens', 0)
self.token_type_combo = QComboBox()
if ColorScheme.dark_scheme and sys.platform == 'darwin':
# Hack/Workaround to QDarkStyle bugs; see https://github.com/ColinDuquesnoy/QDarkStyleSheet/issues/169#issuecomment-494647801
self.token_type_combo.setItemDelegate(QStyledItemDelegate(self.token_type_combo))
self.token_type_combo.setFixedWidth(200)
self.token_type_combo.currentIndexChanged.connect(self.on_slptok)
self.token_type_combo.currentIndexChanged.connect(self.update_buttons_on_seed) # update 'CoinText' button, etc
self.slp_send_tab_widgets += [
self.slp_amount_e, self.token_type_combo
]
# NB: the translators hopefully will not have too tough a time with this
# *fingers crossed* :)
msg = "<span style=\"font-weight:400;\">" + _('Recipient of the funds.') + " " + \
_("You may enter:"
"<ul>"
"<li> Bitcoin Cash <b>Address</b> <b>★</b>"
"<li> Bitcoin Legacy <b>Address</b> <b>★</b>"
"<li> Simple Ledger <b>Address</b> <b>★</b>"
"<li> <b>Contact name</b> <b>★</b> from the Contacts tab"
#"<li> <b>CoinText</b> e.g. <i>cointext:+1234567</i>"
"<li> <b>OpenAlias</b> e.g. <i>satoshi@domain.com</i>"
"</ul><br>"
" <b>★</b> = Supports <b>pay-to-many</b>, where"
" you may optionally enter multiple lines of the form:"
"</span><br><pre>"
" recipient1, amount1 \n"
" recipient2, amount2 \n"
" etc..."
"</pre>")
self.payto_label = payto_label = HelpLabel(_('Pay &to'), msg)
payto_label.setBuddy(self.payto_e)
qmark = ":icons/question-mark-dark.svg" if ColorScheme.dark_scheme else ":icons/question-mark-light.svg"
qmark_help_but = HelpButton(msg, button_text='', fixed_size=False, icon=QIcon(qmark), custom_parent=self)
self.payto_e.addWidget(qmark_help_but, index=0)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter(self.payto_e)
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('&Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
description_label.setBuddy(self.message_e)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg_opreturn = ( _('OP_RETURN data (optional).') + '\n\n'
+ _('Posts a PERMANENT note to the BCH blockchain as part of this transaction.')
+ '\n\n' + _('If you specify OP_RETURN text, you may leave the \'Pay to\' field blank.') )
self.opreturn_label = HelpLabel(_('&OP_RETURN'), msg_opreturn)
grid.addWidget(self.opreturn_label, 3, 0)
self.message_opreturn_e = MyLineEdit()
self.opreturn_label.setBuddy(self.message_opreturn_e)
hbox = QHBoxLayout()
hbox.addWidget(self.message_opreturn_e)
self.opreturn_rawhex_cb = QCheckBox(_('&Raw hex script'))
self.opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
hbox.addWidget(self.opreturn_rawhex_cb)
grid.addLayout(hbox, 3 , 1, 1, -1)
self.send_tab_opreturn_widgets = [
self.message_opreturn_e,
self.opreturn_rawhex_cb,
self.opreturn_label,
]
self.from_label = QLabel(_('&From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_label.setBuddy(self.from_list)
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
if self.is_slp_wallet:
msg = _('Token Amount to be sent.') + '\n\n' \
+ _("To enable make sure 'Address Mode' is set to SLP.") + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
self.slp_amount_label = HelpLabel(_('Token Amount'), msg)
msg = _('Select the SLP token to send.')
self.slp_token_type_label = HelpLabel(_('Token Type'), msg)
grid.addWidget(self.slp_token_type_label, 5, 0)
grid.addWidget(self.token_type_combo, 5, 1)
grid.addWidget(self.slp_amount_label, 6, 0)
hbox = QHBoxLayout()
self.amount_e.setMinimumWidth(195)
self.slp_amount_e.setMinimumWidth(195)
self.slp_amount_e.textEdited.connect(self.update_fee)
hbox.addWidget(self.slp_amount_e)
self.slp_max_button = EnterButton(_("Max"), self.slp_spend_max)
hbox.addWidget(self.slp_max_button)
grid.addLayout(hbox, 6, 1)
self.slp_extra_bch_cb = QCheckBox(_('Also send BCH?'))
self.slp_extra_bch_cb.clicked.connect(self.on_slp_extra_bch)
self.slp_extra_bch_cb.setHidden(True)
grid.addWidget(self.slp_extra_bch_cb, 6, 2)
self.slp_send_tab_widgets += [
self.slp_max_button, self.slp_extra_bch_cb
]
msg = _('BCH amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
self.amount_label = HelpLabel(_('BCH &Amount'), msg)
self.amount_label.setBuddy(self.amount_e)
grid.addWidget(self.amount_label, 7, 0)
hbox = QHBoxLayout()
hbox.addWidget(self.amount_e)
self.max_button = EnterButton(_("&Max"), self.spend_max)
self.max_button.setCheckable(True)
hbox.addWidget(self.max_button)
grid.addLayout(hbox, 7, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 7, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
msg = _('Bitcoin Cash transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('F&ee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_e_label.setBuddy(self.fee_slider)
self.fee_slider.setFixedWidth(140)
self.fee_custom_lbl = HelpLabel(self.get_custom_fee_text(),
_('This is the fee rate that will be used for this transaction.')
+ "\n\n" + _('It is calculated from the Custom Fee Rate in preferences, but can be overridden from the manual fee edit on this form (if enabled).')
+ "\n\n" + _('Generally, a fee of 1.0 sats/B is a good minimal rate to ensure your transaction will make it into the next block.'))
self.fee_custom_lbl.setFixedWidth(140)
self.fee_slider_mogrifier()
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
grid.addWidget(self.fee_e_label, 9, 0)
hbox = QHBoxLayout()
hbox.addWidget(self.fee_slider)
hbox.addWidget(self.fee_custom_lbl)
hbox.addWidget(self.fee_e)
hbox.addStretch(1)
grid.addLayout(hbox, 9, 1)
self.preview_button = EnterButton(_("&Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("&Send"), self.do_send)
self.cointext_button = EnterButton(_("Coin&Text"), self.do_cointext)
self.cointext_button.setToolTip(_('Process CoinText, transforming it into a BIP70 payment request.'))
self.clear_button = EnterButton(_("&Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
buttons.addWidget(self.cointext_button)
buttons.addStretch(1)
grid.addLayout(buttons, 11, 1, 1, 3)
self.payto_e.textChanged.connect(self.update_buttons_on_seed) # hide/unhide cointext button, etc
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textChanged.connect(self.update_fee)
self.message_opreturn_e.editingFinished.connect(self.update_fee)
self.opreturn_rawhex_cb.stateChanged.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
if not self.slp_token_id:
enabled = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enabled)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
if self.is_slp_wallet:
hasError = entry_changed_slp()
if hasError == False:
entry_changed_bch()
else:
entry_changed_bch()
def entry_changed_bch():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough BCH" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
slp = self.wallet.get_slp_locked_balance()
if slp > 0:
text += " (" + self.format_amount(slp).strip() + " BCH held in tokens)"
extra = run_hook("not_enough_funds_extra", self)
if isinstance(extra, str) and extra:
text += " ({})".format(extra)
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
opret_color = ColorScheme.DEFAULT
if self.op_return_toolong:
opret_color = ColorScheme.RED
text = _("OP_RETURN message too large, needs to be no longer than 220 bytes") + (", " if text else "") + text
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.message_opreturn_e.setStyleSheet(opret_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textEdited.connect(entry_changed)
self.message_opreturn_e.editingFinished.connect(entry_changed)
self.opreturn_rawhex_cb.stateChanged.connect(entry_changed)
if self.is_slp_wallet:
self.slp_amount_e.textChanged.connect(entry_changed)
self.slp_amount_e.editingFinished.connect(entry_changed)
def entry_changed_slp():
if self.token_type_combo.currentData():
text = ""
name = self.wallet.token_types.get(self.slp_token_id)['name']
decimals = self.wallet.token_types.get(self.slp_token_id)['decimals']
if self.not_enough_funds_slp or self.not_enough_unfrozen_funds_slp:
bal_avail, x, x, x, frozen_amt = self.wallet.get_slp_token_balance(self.slp_token_id, { 'user_config': { 'confirmed_only': False }})
del x
if self.not_enough_funds_slp:
amt_color = ColorScheme.RED
text = "Not enough " + \
name + " tokens (" + \
format_satoshis_plain_nofloat(bal_avail, decimals) + " valid"
if self.config.get('confirmed_only', False):
conf_bal_avail = self.wallet.get_slp_token_balance(self.slp_token_id, self.config)[0]
unconf_bal = bal_avail - conf_bal_avail
if unconf_bal > 0:
text += ", " + format_satoshis_plain_nofloat(unconf_bal, decimals) + " unconfirmed)"
else:
text += ")"
else:
text += ")"
elif self.not_enough_unfrozen_funds_slp:
amt_color = ColorScheme.RED
text = "Not enough unfrozen " + name + " tokens (" + \
format_satoshis_plain_nofloat(bal_avail, decimals) + " valid, " + \
format_satoshis_plain_nofloat(frozen_amt, decimals) + " frozen)"
elif self.slp_amount_e.isModified():
amt_color = ColorScheme.DEFAULT
else:
amt_color = ColorScheme.BLUE
try:
if self.slp_amount_e.get_amount() > (2 ** 64) - 1:
amt_color = ColorScheme.RED
maxqty = format_satoshis_plain_nofloat((2 ** 64) - 1, self.wallet.token_types.get(self.slp_token_id)['decimals'])
text = _('Token output quantity is too large. Maximum {maxqty}.').format(maxqty=maxqty)
except TypeError:
pass
self.statusBar().showMessage(text)
self.slp_amount_e.setStyleSheet(amt_color.as_stylesheet())
if text != "":
return True
return False
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
self.invoice_list.chkVisible()
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.max_button.setChecked(True)
self.do_update_fee()
def slp_spend_max(self):
self.slp_amount_e.setAmount(self.wallet.get_slp_token_balance(self.slp_token_id, self.config)[3])
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def get_custom_fee_text(self, fee_rate = None):
if not self.config.has_custom_fee_rate():
return ""
else:
if fee_rate is None: fee_rate = self.config.custom_fee_rate() / 1000.0
return str(round(fee_rate*100)/100) + " sats/B"
@staticmethod
def output_for_opreturn_stringdata(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
pushes = op_return.split('<push>')
script = "OP_RETURN"
for data in pushes:
if data.startswith("<hex>"):
data = data.replace("<hex>", "")
elif data.startswith("<empty>"):
pass
else:
data = data.encode('utf-8').hex()
script = script + " " + data
scriptBuffer = ScriptOutput.from_string(script)
if len(scriptBuffer.script) > 223:
raise OPReturnTooLarge(_("OP_RETURN message too large, needs to be no longer than 220 bytes"))
amount = 0
return (TYPE_SCRIPT, scriptBuffer, amount)
@staticmethod
def output_for_opreturn_rawhex(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
if op_return == 'empty':
op_return = ''
try:
op_return_script = b'\x6a' + bytes.fromhex(op_return.strip())
except ValueError:
raise OPReturnError(_('OP_RETURN script expected to be hexadecimal bytes'))
if len(op_return_script) > 223:
raise OPReturnTooLarge(_("OP_RETURN script too large, needs to be no longer than 223 bytes"))
amount = 0
return (TYPE_SCRIPT, ScriptOutput(op_return_script), amount)
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
bch_outputs = []
token_output_amts = []
self.not_enough_funds = False
self.op_return_toolong = False
self.not_enough_funds_slp = False
self.not_enough_unfrozen_funds_slp = False
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
fee_rate = None
if self.is_slp_wallet:
slp_amount = None
if self.slp_token_id:
try:
if len(self.payto_e.get_outputs(self.max_button.isChecked())) > 1:
self.slp_extra_bch_cb.setDisabled(True)
self.slp_extra_bch_cb.setChecked(False)
self.slp_amount_e.setDisabled(True)
self.slp_max_button.setDisabled(True)
self.amount_e.setText('')
self.max_button.setDisabled(True)
amt = []
precision = self.wallet.token_types.get(self.slp_token_id)['decimals']
for _out in self.payto_e.get_outputs(self.max_button.isChecked()):
if _out[2] >= 0:
amt.append(int(_out[2] / 10**8 * 10**precision))
slp_amount = sum(amt)
self.slp_amount_e.setAmount(slp_amount)
else:
self.slp_max_button.setDisabled(False)
self.slp_amount_e.setDisabled(False)
self.slp_extra_bch_cb.setDisabled(False)
slp_amount = self.slp_amount_e.get_amount()
except TypeError:
self.slp_max_button.setDisabled(False)
self.slp_amount_e.setDisabled(False)
self.slp_extra_bch_cb.setDisabled(False)
slp_amount = self.slp_amount_e.get_amount()
if amount is None and slp_amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.statusBar().showMessage('')
return
else:
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.statusBar().showMessage('')
return
try:
selected_slp_coins = []
if self.slp_token_id:
amt = []
try: # first, try to read amount from payto for multi-output
if len(self.payto_e.get_outputs(self.max_button.isChecked())) > 1:
precision = self.wallet.token_types.get(self.slp_token_id)['decimals']
for _out in self.payto_e.get_outputs(self.max_button.isChecked()):
if _out[2] >= 0:
amt.append(int(_out[2] / 10**8 * 10**precision))
slp_amount = sum(amt)
self.slp_amount_e.setAmount(slp_amount)
else:
if self.payto_e.is_pr:
slpmsg = slp.SlpMessage.parseSlpOutputScript(self.payment_request.outputs[0][1])
amt = list(slpmsg.op_return_fields['token_output'][1:])
else:
amt = slp_amount or 0
except TypeError:
if self.payto_e.is_pr:
slpmsg = slp.SlpMessage.parseSlpOutputScript(self.payment_request.outputs[0][1])
amt = list(slpmsg.op_return_fields['token_output'][1:])
else:
amt = slp_amount or 0
selected_slp_coins, slp_op_return_msg = SlpCoinChooser.select_coins(self.wallet, self.slp_token_id, amt, self.config)
if slp_op_return_msg:
bch_outputs = [ slp_op_return_msg ]
token_output_amts = slp.SlpMessage.parseSlpOutputScript(bch_outputs[0][1]).op_return_fields['token_output']
for _amt in token_output_amts:
# just grab a dummy address for this fee calculation - safe for imported_privkey wallets
bch_outputs.append((TYPE_ADDRESS, self.wallet.get_addresses()[0], 546))
bch_payto_outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if bch_payto_outputs and bch_payto_outputs[0][2]: # and not self.slp_token_id:
bch_outputs.extend(bch_payto_outputs)
elif self.slp_token_id and amount and not bch_payto_outputs:
_type, addr = self.get_payto_or_dummy()
bch_outputs.append((_type, addr, amount))
if not bch_outputs:
_type, addr = self.get_payto_or_dummy()
bch_outputs.append((_type, addr, amount))
if not self.slp_token_id:
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if (opreturn_message != '' and opreturn_message is not None):
if self.opreturn_rawhex_cb.isChecked():
bch_outputs.insert(0, self.output_for_opreturn_rawhex(opreturn_message))
else:
bch_outputs.insert(0, self.output_for_opreturn_stringdata(opreturn_message))
fee = self.fee_e.get_amount() if freeze_fee else None
tx = self.wallet.make_unsigned_transaction(self.get_coins(isInvoice = False), bch_outputs, self.config, fee, mandatory_coins=selected_slp_coins)
if self.slp_token_id:
self.wallet.check_sufficient_slp_balance(slp.SlpMessage.parseSlpOutputScript(slp_op_return_msg[1]), self.config)
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except NotEnoughFundsSlp:
self.not_enough_funds_slp = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except NotEnoughUnfrozenFundsSlp:
self.not_enough_unfrozen_funds_slp = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except OPReturnTooLarge:
self.op_return_toolong = True
return
except OPReturnError as e:
self.statusBar().showMessage(str(e))
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
if not self.slp_token_id or len(token_output_amts) > 0:
self.fee_e.setAmount(fee)
if self.max_button.isChecked():
amount = tx.output_value()
if self.is_slp_wallet and token_output_amts:
amount = tx.output_value() - (len(token_output_amts)-1) * 546
self.amount_e.setAmount(amount)
if fee is not None:
fee_rate = fee / tx.estimated_size()
self.fee_slider_mogrifier(self.get_custom_fee_text(fee_rate))
def fee_slider_mogrifier(self, text = None):
fee_slider_hidden = self.config.has_custom_fee_rate()
self.fee_slider.setHidden(fee_slider_hidden)
self.fee_custom_lbl.setHidden(not fee_slider_hidden)
if text is not None: self.fee_custom_lbl.setText(text)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
if not item:
return
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x['prevout_hash']
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
x['prevout_n'], x['address'])
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
on_pw_cancel = kwargs.pop('on_pw_cancel', None)
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
if callable(on_pw_cancel):
on_pw_cancel()
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self, preview=False):
bch_outputs = []
selected_slp_coins = []
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if self.slp_token_id:
if self.slp_amount_e.get_amount() == 0 or self.slp_amount_e.get_amount() is None:
self.show_message(_("No SLP token amount provided."))
return
try:
""" Guard against multiline 'Pay To' field """
amt = []
is_paytomany_slp = False
try: # first, try to read amount from payto for multi-output
if len(self.payto_e.get_outputs(self.max_button.isChecked())) > 1:
precision = self.wallet.token_types.get(self.slp_token_id)['decimals']
for _out in self.payto_e.get_outputs(self.max_button.isChecked()):
if _out[2] >= 0:
amt.append(int(_out[2] / 10**8 * 10**precision))
is_paytomany_slp = True
else:
if self.payto_e.is_pr:
slpmsg = slp.SlpMessage.parseSlpOutputScript(self.payment_request.outputs[0][1])
amt = list(slpmsg.op_return_fields['token_output'][1:])
else:
amt = self.slp_amount_e.get_amount()
except TypeError:
if self.payto_e.is_pr:
slpmsg = slp.SlpMessage.parseSlpOutputScript(self.payment_request.outputs[0][1])
amt = list(slpmsg.op_return_fields['token_output'][1:])
else:
amt = self.slp_amount_e.get_amount()
selected_slp_coins, slp_op_return_msg = SlpCoinChooser.select_coins(self.wallet, self.slp_token_id, amt, self.config)
if not self.payment_request:
""" Guard against bad address encoding """
if not self.payto_e.payto_address and not is_paytomany_slp: # and not self.payto_e.text():
self.show_error(_("Receiver SLP address is missing or is not formatted properly."))
return
""" Require SLPADDR prefix in 'Pay To' field. """
if networks.net.SLPADDR_PREFIX not in self.payto_e.address_string_for_slp_check and not is_paytomany_slp:
self.show_error(_("Address provided is not in SLP Address format.\n\nThe address should be encoded using 'simpleledger:' or 'slptest:' URI prefix."))
return
if slp_op_return_msg:
bch_outputs = [ slp_op_return_msg ]
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
except slp.SlpSerializingError as e:
self.show_error(str(e))
return
except (NotEnoughFundsSlp, NotEnoughUnfrozenFundsSlp) as e:
self.show_error(str(e))
return
isInvoice = False
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
isInvoice = True
bch_outputs.extend(self.payment_request.get_outputs())
if self.slp_token_id:
bch_outputs[0] = slp_op_return_msg
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
if self.slp_token_id:
if self.payto_e.payto_address:
_type, _addr = self.payto_e.payto_address
bch_outputs.append((_type, _addr, 546))
else:
try:
for _out in self.payto_e.get_outputs(self.max_button.isChecked()):
if _out[2] >= 0:
bch_outputs.append((_out[0], _out[1], 546))
except:
pass
if self.payto_e.is_alias and not self.payto_e.validated:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
coins = self.get_coins(isInvoice=isInvoice)
""" SLP: Add an additional token change output """
if self.slp_token_id:
change_addr = None
token_outputs = slp.SlpMessage.parseSlpOutputScript(bch_outputs[0][1]).op_return_fields['token_output']
if len(token_outputs) > 1 and len(bch_outputs) < len(token_outputs):
""" start of logic copied from wallet.py """
addrs = self.wallet.get_change_addresses()[-self.wallet.gap_limit_for_change:]
if self.wallet.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.wallet.get_num_tx(addr) == 0]
if not change_addrs:
import random
change_addrs = [random.choice(addrs)]
change_addr = change_addrs[0]
elif len(change_addrs) > 1:
change_addr = change_addrs[1]
else:
change_addr = change_addrs[0]
elif coins:
change_addr = coins[0]['address']
else:
change_addr = self.wallet.get_addresses()[0]
bch_outputs.append((TYPE_ADDRESS, change_addr, 546))
# add non-SLP related BCH amounts
if not self.payment_request and self.amount_e.get_amount():
bch_outputs.extend(self.payto_e.get_outputs(self.max_button.isChecked()))
""" Only Allow OP_RETURN if SLP is disabled. """
if not self.slp_token_id:
try:
# handle op_return if specified and enabled
opreturn_message = self.message_opreturn_e.text()
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
bch_outputs.append(self.output_for_opreturn_rawhex(opreturn_message))
else:
bch_outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
if not bch_outputs:
self.show_error(_('Enter receiver address (No BCH outputs).'))
return
for _type, addr, amount in bch_outputs:
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
return bch_outputs, fee, label, coins, selected_slp_coins
_cointext_popup_kill_tab_changed_connection = None
def do_cointext(self):
''' This is called by the cointext button 'clicked' signal and it
initiates the processing of the cointext URL. This should only be
called if self.payto_e.cointext is not None, otherwise it will do
nothing. '''
if self.payto_e.cointext and not self.payment_request:
if self.gui_object.warn_if_no_network(self):
return
phone = self.payto_e.cointext
sats = self.amount_e.get_amount()
if sats:
url = "https://pay.cointext.io/p/{}/{}".format(phone, sats)
def get_cointext_pr():
# Runs in thread
self.print_error("CoinText URL", url)
pr = paymentrequest.get_payment_request(url) # raises on error
return pr
def on_success(pr):
# Runs in main thread
if pr:
if pr.error:
self.print_error("CoinText ERROR", pr.error)
self.show_error(_("There was an error processing the CoinText. Please check the phone number and try again."))
return
self.print_error("CoinText RESULT", repr(pr))
self.prepare_for_payment_request()
def show_popup():
if not self.send_button.isVisible():
# likely a watching-only wallet, in which case
# showing the popup label for the send button
# leads to unspecified position for the button
return
show_it = partial(
ShowPopupLabel,
text=_("Please review payment before sending CoinText"),
target=self.send_button, timeout=15000.0,
name="CoinTextPopup",
pointer_position=PopupWidget.LeftSide,
activation_hides=True, track_target=True,
dark_mode = ColorScheme.dark_scheme
)
if not self._cointext_popup_kill_tab_changed_connection:
# this ensures that if user changes tabs, the popup dies
# ... it is only connected once per instance lifetime
self._cointext_popup_kill_tab_changed_connection = self.tabs.currentChanged.connect(lambda: KillPopupLabel("CoinTextPopup"))
QTimer.singleShot(0, show_it)
pr.request_ok_callback = show_popup
self.on_pr(pr)
def on_error(exc):
self.print_error("CoinText EXCEPTION", repr(exc))
self.on_error(exc)
WaitingDialog(self.top_level_window(),
_("Retrieving CoinText info, please wait ..."),
get_cointext_pr, on_success, on_error)
else:
self.show_error(_('CoinText: Please specify an amount'))
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab(preview=preview)
if not r:
return
outputs, fee, tx_desc, coins, slp_coins = r
if self.slp_token_id:
try:
self.wallet.check_sufficient_slp_balance(slp.SlpMessage.parseSlpOutputScript(outputs[0][1]), self.config)
except slp.SlpInvalidOutputMessage:
self.show_message(_("No token outputs available.\n\nIf you have unconfirmed tokens wait 1 confirmation or turn off 'Spend only confirmed coins' in preferences, and try again."))
return
except NotEnoughFundsSlp:
self.show_message(_("Token balance too low."))
return
except NotEnoughUnfrozenFundsSlp:
self.show_message(_("Unfrozen SLP token balance is too low. Unfreeze some of the token coins associated with with this token."))
return
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee, mandatory_coins=slp_coins)
except NotEnoughFunds:
if self.payment_request:
self.show_message(_("Insufficient BCH balance.\n\nPayment request requires a balance of confirmed coins."))
else:
self.show_message(_("Insufficient BCH balance"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 50 sat/byte."))
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
#if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
#self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
#return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
if self.slp_token_id:
slp_amt_str = format_satoshis_plain_nofloat(self.slp_amount_e.get_amount(), self.wallet.token_types.get(self.slp_token_id)['decimals'])
slp_name = self.wallet.token_types[self.slp_token_id]['name']
msg = [
_("BCH amount to be sent") + ": " + self.format_amount_and_units(amount),
"\nToken amount to be sent" + ": " + slp_amt_str + " " + slp_name,
_("\nMining fee") + ": " + self.format_amount_and_units(fee),
]
else:
msg = [
_("\nAmount to be sent") + ": " + self.format_amount_and_units(amount),
_("\nMining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("\nAdditional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
# IN THE FUTURE IF WE WANT TO APPEND SOMETHING IN THE MSG ABOUT THE FEE, CODE IS COMMENTED OUT:
#if fee > confirm_rate * tx.estimated_size() / 1000:
# msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if (fee < (tx.estimated_size())):
msg.append(_('\nWarning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm."))
tx.ephemeral['warned_low_fee_already'] = True
if self.config.get('enable_opreturn') and self.message_opreturn_e.text():
msg.append(_("\nYou are using an OP_RETURN message. This gets permanently written to the blockchain."))
if self.wallet.has_password():
msg.append("")
msg.append(_("\nEnter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('\nProceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx, tx_desc)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password, *, slp_coins_to_burn=None, slp_amt_to_burn=None):
self.sign_tx_with_password(tx, callback, password, slp_coins_to_burn=slp_coins_to_burn, slp_amt_to_burn=slp_amt_to_burn)
def sign_tx_with_password(self, tx, callback, password, *, slp_coins_to_burn=None, slp_amt_to_burn=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# check transaction SLP validity before signing
try:
assert SlpTransactionChecker.check_tx_slp(self.wallet, tx, coins_to_burn=slp_coins_to_burn, amt_to_burn=slp_amt_to_burn)
except (Exception, AssertionError) as e:
self.show_warning(str(e))
return
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs, use_cache=True)
else:
task = partial(self.wallet.sign_transaction, tx, password, use_cache=True)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc, *, callback=None):
def broadcast_thread():
# non-GUI thread
status = False
msg = "Failed"
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
if pr:
refund_address = self.wallet.get_receiving_addresses()[0]
is_slp = False
try:
slp.SlpMessage.parseSlpOutputScript(self.payment_request.outputs[0][1])
is_slp = True
except:
pass
ack_status, ack_msg = pr.send_payment(str(tx), refund_address, is_slp=is_slp)
if not ack_status:
if ack_msg == "no url":
# "no url" hard-coded in send_payment method
# it means merchant doesn't need the tx sent to him
# since he didn't specify a POST url.
# so we just broadcast and rely on that result status.
ack_msg = None
else:
return False, ack_msg
# at this point either ack_status is True or there is "no url"
# and we proceed anyway with the broadcast
status, msg = self.network.broadcast_transaction(tx)
# figure out what to return...
msg = ack_msg or msg # prefer the merchant's ack_msg over the broadcast msg, but fallback to broadcast msg if no ack_msg.
status = bool(ack_status or status) # if both broadcast and merchant ACK failed -- it's a failure. if either succeeded -- it's a success
if status:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
else:
# Not a PR, just broadcast.
status, msg = self.network.broadcast_transaction(tx)
return status, msg
# Check fee and warn if it's below 1.0 sats/B (and not warned already)
fee = None
try: fee = tx.get_fee()
except: pass # no fee info available for tx
# Check fee >= size otherwise warn. FIXME: If someday network relay
# rules change to be other than 1.0 sats/B minimum, this code needs
# to be changed.
if (isinstance(fee, int) and tx.is_complete() and fee < len(str(tx))//2
and not tx.ephemeral.get('warned_low_fee_already')):
msg = _('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm.") + "\n\n" + _("Proceed?")
if not self.question(msg, title = _("Low Fee")):
return
# /end fee check
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
if self.gui_object.warn_if_no_network(self):
# Don't allow a useless broadcast when in offline mode. Previous to this we were getting an exception on broadcast.
return
elif not self.network.is_connected():
# Don't allow a potentially very slow broadcast when obviously not connected.
parent.show_error(_("Not connected"))
return
def broadcast_done(result):
# GUI thread
cb_result = False
if result:
status, msg = result
if status:
cb_result = True
buttons, copy_index, copy_link = [ _('Ok') ], None, ''
try: txid = tx.txid() # returns None if not is_complete, but may raise potentially as well
except: txid = None
if txid is not None:
if tx_desc is not None:
self.wallet.set_label(txid, tx_desc)
copy_link = web.BE_URL(self.config, 'tx', txid)
if copy_link:
# tx is complete and there is a copy_link
buttons.insert(0, _("Copy link"))
copy_index = 0
if parent.show_message(_('Payment sent.') + '\n' + msg,
buttons = buttons,
defaultButton = buttons[-1],
escapeButton = buttons[-1]) == copy_index:
# There WAS a 'Copy link' and they clicked it
self.copy_to_clipboard(copy_link, _("Block explorer link copied to clipboard"), self.top_level_window())
self.invoice_list.update()
self.do_clear()
else:
if msg.startswith("error: "):
msg = msg.split(" ", 1)[-1] # take the last part, sans the "error: " prefix
parent.show_error(msg)
if callback:
callback(cb_result)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices, *, add_cancel_button=False):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
buts = [OkButton(dialog)]
if add_cancel_button:
buts.insert(0, CancelButton(dialog))
vbox.addLayout(Buttons(*buts))
result = dialog.exec_()
dialog.setParent(None)
if not result:
return None
return clayout.selected_index()
def lock_amount(self, b):
pass
# the following is now not needed since slp dust amounts are now hard coded
'''
This if-statement was added for SLP around the following two lines
in order to keep the amount field locked and Max button disabled
when the payto field is edited when a token is selected.
'''
# if self.is_slp_wallet and self.token_type_combo.currentData():
# self.amount_e.setFrozen(True)
# self.max_button.setEnabled(False)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.cointext = None
self.payto_e.is_pr = True
for e in [ self.payto_e, self.amount_e, self.message_e ]:
e.setFrozen(True)
for e in self.slp_send_tab_widgets:
try:
e.setFrozen(True)
except:
e.setDisabled(True)
if self.is_slp_wallet:
# reset SLP token type to 0
self.token_type_combo.setCurrentIndex(0)
self.max_button.setDisabled(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
# SLP: Check to see if this is an SLP message
try:
slpmsg = slp.SlpMessage.parseSlpOutputScript(pr.outputs[0][1])
except:
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
else:
self.payto_e.setText(pr.get_slp_requestor())
tokenid = slpmsg.op_return_fields['token_id_hex']
amount = sum(slpmsg.op_return_fields['token_output'][1:])
index = 1
while index < self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(index)
if self.token_type_combo.currentData() == tokenid:
break
index+=1
if index == self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(0)
from .slp_add_token_dialog import SlpAddTokenDialog
def add_token_callback():
index = 1
while index < self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(index)
if self.token_type_combo.currentData() == tokenid:
break
index+=1
self.slp_amount_e.setAmount(amount) # * pow(10, self.slp_amount_e.token_decimals))
self.slp_amount_e.textEdited.emit("")
self.slp_max_button.setDisabled(True)
SlpAddTokenDialog(self, token_id_hex = tokenid, token_name=None, allow_overwrite=None, add_callback=add_token_callback)
return
else:
self.slp_amount_e.setAmount(amount) # * pow(10, self.slp_amount_e.token_decimals))
self.slp_amount_e.textEdited.emit("")
self.slp_max_button.setDisabled(True)
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
# New! Payment requests have an optional (may not be there!) attribute
# 'request_ok_callback' which takes 0 args and is called on request ok
# This facility was needed to do the CoinTextPopup label properly.
cb = getattr(self.payment_request, 'request_ok_callback', None)
if callable(cb):
cb()
def payment_request_error(self):
request_error = (self.payment_request and self.payment_request.error) or ''
self.payment_request = None
self.print_error("PaymentRequest error:", request_error)
self.show_error(_("There was an error processing the payment request"), rich_text=False, detail_text=request_error)
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
self.do_clear()
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr)
except Exception as e:
if 'ms-python' in URI: # this is needed for visual studio code debugger
return
self.show_error(_('Invalid Address URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
scheme = out.get('scheme')
address = out.get('address')
amounts = out.get('amounts')
label = out.get('label')
message = out.get('message')
op_return = out.get('op_return')
op_return_raw = out.get('op_return_raw')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(URI.split('?')[0])
if message:
self.message_e.setText(message)
if amounts:
if scheme == networks.net.CASHADDR_PREFIX and 'bch' in amounts:
self.amount_e.setAmount(amounts['bch']['amount'])
self.amount_e.textEdited.emit("")
elif self.is_slp_wallet and scheme == networks.net.SLPADDR_PREFIX:
# pick first token in amounts
tokenid = None
for key in amounts:
if key != 'bch':
tokenid = key
index = 1
while index < self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(index)
if self.token_type_combo.currentData() == tokenid:
break
index+=1
if index == self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(0)
from .slp_add_token_dialog import SlpAddTokenDialog
def add_token_callback():
index = 1
while index < self.token_type_combo.count():
self.token_type_combo.setCurrentIndex(index)
if self.token_type_combo.currentData() == tokenid:
break
index+=1
self.slp_amount_e.setAmount(amounts[tokenid]['amount'] * pow(10, self.slp_amount_e.token_decimals))
self.slp_amount_e.textEdited.emit("")
SlpAddTokenDialog(self, token_id_hex = tokenid, token_name=None, allow_overwrite=True, add_callback=add_token_callback)
return
self.slp_amount_e.setAmount(amounts[tokenid]['amount'] * pow(10, self.slp_amount_e.token_decimals))
self.slp_amount_e.textEdited.emit("")
break
if tokenid == None and 'bch' in amounts:
self.amount_e.setAmount(amounts['bch']['amount'])
self.amount_e.textEdited.emit("")
elif 'bch' in amounts:
self.amount_e.setAmount(amounts['bch']['amount'])
self.amount_e.textEdited.emit("")
self.slp_extra_bch_cb.setChecked(True)
self.slp_extra_bch_cb.clicked.emit()
else:
self.show_error("Unsupported URI prefix: " + scheme)
if op_return:
self.message_opreturn_e.setText(op_return)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_label.setHidden(False)
elif op_return_raw is not None:
# 'is not None' allows blank value.
# op_return_raw is secondary precedence to op_return
if not op_return_raw:
op_return_raw='empty'
self.message_opreturn_e.setText(op_return_raw)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_label.setHidden(False)
elif not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText('')
self.message_opreturn_e.setHidden(True)
self.opreturn_rawhex_cb.setHidden(True)
self.opreturn_label.setHidden(True)
def do_clear(self):
''' Clears the send tab, reseting its UI state to its initiatial state.'''
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.message_opreturn_e]:
e.setText('')
e.setFrozen(False)
self.max_button.setDisabled(False)
KillPopupLabel("CoinTextPopup") # just in case it was alive
self.max_button.setChecked(False)
self.not_enough_funds = False
self.op_return_toolong = False
self.payment_request = None
self.payto_e.cointext = None
self.payto_e.is_pr = False
self.opreturn_rawhex_cb.setChecked(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.message_opreturn_e.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_rawhex_cb.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_label.setVisible(self.config.get('enable_opreturn', False))
self.update_status()
self.amount_e.setHidden(False)
self.amount_label.setHidden(False)
if self.is_slp_wallet:
self.not_enough_funds_slp = False
self.not_enough_unfrozen_funds_slp = False
for e in self.slp_send_tab_widgets:
e.setDisabled(False)
self.slp_amount_e.setText('')
self.token_type_combo.setCurrentIndex(0)
self.on_slptok() # resets parts of the send tab to initial state
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_coin_state(self, utxos, freeze):
self.wallet.set_frozen_coin_state(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_converter_tab(self):
source_address = QLineEdit()
cash_address = ButtonsLineEdit()
cash_address.addCopyButton()
cash_address.setReadOnly(True)
legacy_address = ButtonsLineEdit()
legacy_address.addCopyButton()
legacy_address.setReadOnly(True)
slp_address = ButtonsLineEdit()
slp_address.setReadOnly(True)
slp_address.addCopyButton()
widgets = [
(cash_address, Address.FMT_CASHADDR),
(legacy_address, Address.FMT_LEGACY),
(slp_address, Address.FMT_SLPADDR)
]
def convert_address():
try:
addr = Address.from_string(source_address.text().strip())
except:
addr = None
for widget, fmt in widgets:
if addr:
widget.setText(addr.to_full_string(fmt))
else:
widget.setText('')
source_address.textChanged.connect(convert_address)
w = QWidget()
grid = QGridLayout()
grid.setSpacing(15)
grid.setColumnStretch(1, 2)
grid.setColumnStretch(2, 1)
label = QLabel(_('&Address to convert'))
label.setBuddy(source_address)
grid.addWidget(label, 0, 0)
grid.addWidget(source_address, 0, 1)
label = QLabel(_('&Cash address'))
label.setBuddy(cash_address)
grid.addWidget(label, 1, 0)
grid.addWidget(cash_address, 1, 1)
label = QLabel(_('&Legacy address'))
label.setBuddy(legacy_address)
grid.addWidget(label, 2, 0)
grid.addWidget(legacy_address, 2, 1)
grid.addWidget(QLabel(_('SLP address')), 3, 0)
grid.addWidget(slp_address, 3, 1)
w.setLayout(grid)
label = WWLabel(_(
"This tool helps convert between address formats for Bitcoin "
"Cash addresses.\nYou are encouraged to use the 'Cash address' "
"format."
))
vbox = QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(w)
vbox.addStretch(1)
w = QWidget()
w.setLayout(vbox)
return w
def create_list_tab(self, l, list_header=None):
class ListTab(QWidget):
def showEvent(self, e):
super().showEvent(e)
w = ListTab()
w.main_window = self
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_slp_mgt_tab(self):
self.create_token_dialog = None
from .slp_mgt import SlpMgt
self.token_list = l = SlpMgt(self)
w = self.create_list_tab(l)
vbox = w.layout()
vbox.setSpacing(10)
create_button = b = QPushButton(_("Create New Token"))
create_button.setAutoDefault(False)
create_button.setDefault(False)
b.clicked.connect(self.show_create_token_dialog)
vbox.addWidget(create_button)
w.setLayout(vbox)
return w
def show_create_token_dialog(self):
c, u, x = self.wallet.get_balance()
bal = c + u - self.wallet.get_slp_locked_balance()
if bal < 1000:
self.receive_tab.low_balance_warning_shown = True
self.show_warning("Low BCH balance.\n\nBefore creating a new token you must add Bitcoin Cash to this wallet. We recommend a minimum of 0.0001 BCH to get started.\n\nSend BCH to the address displayed in the 'Receive' tab.")
self.show_receive_tab()
self.toggle_cashaddr(1, True)
return
try:
self.create_token_dialog.show()
self.create_token_dialog.raise_()
self.create_token_dialog.activateWindow()
except AttributeError:
self.create_token_dialog = d = SlpCreateTokenGenesisDialog(self,)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_ui_string()))):
self.wallet.delete_address(addr)
self.update_tabs()
self.update_status()
self.clear_receive_tab()
def get_coins(self, isInvoice = False):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config, isInvoice)
# def get_slp_coins(self, isInvoice = False):
# isInvoice = False
# return self.wallet.get_slp_spendable_coins(self.slp_token_id, None, self.config, isInvoice)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.do_clear()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not Address.is_valid(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
old_entry = self.contacts.get(address, None)
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.update_completions()
# The contact has changed, update any addresses that are displayed with the old information.
run_hook('update_contact', address, self.contacts[address], old_entry)
return True
def delete_contacts(self, addresses):
contact_str = " + ".join(addresses) if len(addresses) <= 3 else _("{} contacts").format(len(addresses))
if not self.question(_("Remove {} from your list of contacts?")
.format(contact_str)):
return
removed_entries = []
for address in addresses:
if address in self.contacts.keys():
removed_entries.append((address, self.contacts[address]))
self.contacts.pop(address)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.contact_list.update()
self.update_completions()
run_hook('delete_contacts', removed_entries)
def add_token_type(self, token_class, token_id, token_name, decimals_divisibility, *, error_callback=None, show_errors=True, allow_overwrite=False):
# FIXME: are both args error_callback and show_errors both necessary?
# Maybe so if we want the default to be self.show_error...
if not show_errors:
# setting error_callback to None will suppress errors being shown
# iff show_errors is False
error_callback = None
if error_callback is None and show_errors:
# They asked for errors but supplied no callback. Use the standard
# one for main_window
error_callback = self.show_error
# The below call checks sanity and calls error_callback for us
# with an error message argument on failure, returning False.
# On success it will add the token, write to wallet storage,
# and potentially kick off the verifier.
if not self.wallet.add_token_safe(
token_class, token_id, token_name, decimals_divisibility,
error_callback=error_callback, allow_overwrite=allow_overwrite,
write_storage=True):
return False
# Great success! Update GUI.
self.token_list.update()
self.update_token_type_combo()
self.slp_history_list.update()
return True
def delete_slp_token(self, token_ids):
if not self.question(_("Remove {} from your list of tokens?")
.format(" + ".join(token_ids))):
return
for tid in token_ids:
self.wallet.token_types.pop(tid)
self.token_list.update()
self.update_token_type_combo()
self.slp_history_list.update()
self.wallet.save_transactions(True)
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self.top_level_window(), _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1].to_ui_string(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
weakD = Weak.ref(d)
def do_export():
ext = pr.export_file_ext()
fn = self.getSaveFileName(_("Save invoice to file"), "*." + ext)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.export_file_data())
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.invoice_list.update()
d = weakD()
if d: d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
d.setParent(None) # So Python can GC
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console(wallet=self.wallet)
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
set_json = Weak(self.console.set_json)
c = commands.Commands(self.config, self.wallet, self.network, lambda: set_json(True))
methods = {}
password_getter = Weak(self.password_dialog)
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, *args, password_getter=password_getter,
**kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self._search_box_spacer = QWidget()
self._search_box_spacer.setFixedWidth(6) # 6 px spacer
self.search_box = QLineEdit()
self.search_box.setPlaceholderText(_("Search wallet, {key}F to hide").format(key='Ctrl+' if sys.platform != 'darwin' else '⌘'))
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box, 1)
self.addr_format_label = QLabel("")
sb.addPermanentWidget(self.addr_format_label)
self.update_available_button = StatusBarButton(QIcon(":icons/electron-cash-update.svg"), _("Update available, click for details"), lambda: self.gui_object.show_update_checker(self, skip_check=True))
self.update_available_button.setStatusTip(_("An Electron Cash update is available"))
sb.addPermanentWidget(self.update_available_button)
self.update_available_button.setVisible(bool(self.gui_object.new_version_available)) # if hidden now gets unhidden by on_update_available when a new version comes in
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
self.addr_converter_button = StatusBarButton(
self.cashaddr_icon(),
_("Toggle CashAddr Display"),
self.toggle_cashaddr_status_bar
)
sb.addPermanentWidget(self.addr_converter_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.svg"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
weakSelf = Weak(self)
gui_object = self.gui_object
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.svg"), _("Network"), lambda: gui_object.show_network_dialog(weakSelf))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def on_update_available(self, b):
self.update_available_button.setVisible(bool(b))
# The popup label won't really be shown unless this window is
# on top.. but regardless we give each label a unique internal name
# so they dont interfere with each other.
lblName = "UpdateAvailable_" + self.diagnostic_name()
if b:
ShowPopupLabel(name = lblName,
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Update Available"),_("Click for details")),
target=self.update_available_button,
timeout=20000, onClick=self.update_available_button.click,
onRightClick=self.update_available_button.click,
dark_mode = ColorScheme.dark_scheme)
else:
# Immediately kills any extant labels
KillPopupLabel(lblName)
def update_lock_icon(self):
icon = QIcon(":icons/lock.svg") if self.wallet.has_password() else QIcon(":icons/unlock.svg")
tip = _('Wallet Password') + ' - '
tip += _('Enabled') if self.wallet.has_password() else _('Disabled')
self.password_button.setIcon(icon)
self.password_button.setStatusTip(tip)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
is_cointext = bool(self.payto_e.cointext)
if is_cointext and self.slp_token_id:
self.token_type_combo.setCurrentIndex(0)
self.send_button.setVisible(not self.wallet.is_watching_only() and not is_cointext)
self.preview_button.setVisible(not is_cointext)
self.cointext_button.setVisible(is_cointext)
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self.top_level_window(), self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def get_passphrase_dialog(self, msg : str, title : str = None, *, permit_empty = False) -> str:
from .password_dialog import PassphraseDialog
d = PassphraseDialog(self.wallet, self.top_level_window(), msg, title, permit_empty = permit_empty)
return d.run()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.balance_label.setHidden(True)
self.statusBar().insertWidget(0, self._search_box_spacer)
self._search_box_spacer.show()
self.search_box.setFocus(1)
if self.search_box.text():
self.do_search(self.search_box.text())
else:
self._search_box_spacer.hide()
self.statusBar().removeWidget(self._search_box_spacer)
self.balance_label.setHidden(False)
self.do_search('')
def do_search(self, t):
'''Apply search text to all tabs. FIXME: if a plugin later is loaded
it will not receive the search filter -- but most plugins I know about
do not support searchable_list anyway, so hopefully it's a non-issue.'''
for i in range(self.tabs.count()):
tab = self.tabs.widget(i)
try:
tab.searchable_list.filter(t)
except (AttributeError, TypeError):
pass
def new_contact_dialog(self):
d = WindowModalDialog(self.top_level_window(), _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self.top_level_window(), _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton()
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path) # implicitly also calls stop_wallet
self.update_recently_visited(wallet_path) # this ensures it's deleted from the menu
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
self.close()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog, SeedBackupDialog
WhichClass = SeedBackupDialog if self.wallet.storage.get('wallet_seed_needs_backup') else SeedDialog
d = WhichClass(self.top_level_window(), seed, passphrase, wallet=self.wallet)
if d.exec_() == QDialog.Accepted:
# This banch is in case they were in the SeedBackupDialog; below
# makes the new non-warning icon (if any) take effect
self.update_status()
d.setParent(None) # gc now rather than later
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
d.setParent(None) # Help Python GC this sooner rather than later
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self.top_level_window(), _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
pk_lbl = QLabel(_("Private key") + ':')
vbox.addWidget(pk_lbl)
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton()
# BIP38 Encrypt Button
def setup_encrypt_button():
encrypt_but = QPushButton(_("Encrypt BIP38") + "...")
f = encrypt_but.font(); f.setPointSize(f.pointSize()-1); encrypt_but.setFont(f) # make font -= 1
encrypt_but.setEnabled(bool(bitcoin.Bip38Key.canEncrypt()))
encrypt_but.setToolTip(_("Encrypt this private key using BIP38 encryption")
if encrypt_but.isEnabled() else
_("BIP38 encryption unavailable: install pycryptodomex to enable"))
border_color = ColorScheme.DEFAULT.as_color(False)
border_color.setAlphaF(0.65)
encrypt_but_ss_en = (
keys_e.styleSheet() + (("QPushButton { border: 1px solid %s; border-radius: 6px; padding: 2px; margin: 2px; } "
"QPushButton:hover { border: 1px solid #3daee9; } "
"QPushButton:disabled { border: 1px solid transparent; ") % (border_color.name(QColor.HexArgb)))
)
encrypt_but_ss_dis = ( keys_e.styleSheet() )
encrypt_but.setStyleSheet(encrypt_but_ss_en if encrypt_but.isEnabled() else encrypt_but_ss_dis)
def on_encrypt():
passphrase = self.get_passphrase_dialog(
msg = (
_("Specify a passphrase to use for BIP38 encryption.") + "\n" +
_("Save this passphrase if you save the generated key so you may decrypt it later.")
)
)
if not passphrase:
return
try:
bip38 = str(bitcoin.Bip38Key.encrypt(pk, passphrase))
keys_e.setText(bip38)
encrypt_but.setEnabled(False)
encrypt_but.setStyleSheet(encrypt_but_ss_dis)
pk_lbl.setText( _("BIP38 Key") + ":" )
self.show_message(_("WIF key has been encrypted using BIP38.\n\n"
"You may save this encrypted key to a file or print out its QR code and/or text.\n\n"
"It is strongly encrypted with the passphrase you specified and safe to store electronically. "
"However, the passphrase should be stored securely and not shared with anyone."))
except Exception as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
encrypt_but.clicked.connect(on_encrypt)
keys_e.addWidget(encrypt_but, 0)
setup_encrypt_button()
# /BIP38 Encrypt Button
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script().hex())
rds_e.addCopyButton()
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
if addr.kind != addr.ADDR_P2PKH:
msg_sign = ( _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' +
_('The operation is undefined. Not just in Electron Cash, but in general.') )
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' + msg_sign)
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
task = partial(self.wallet.sign_message, addr, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip())
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
message = message.toPlainText().strip().encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_ui_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_warning(str(e))
def encrypt_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
if not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
return PasswordDialog(parent, msg).run()
def tx_from_text(self, txt):
from electroncash.transaction import tx_from_str
try:
txt_tx = tx_from_str(txt)
tx = Transaction(txt_tx, sign_schnorr=self.wallet.is_schnorr_enabled())
tx.deserialize()
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [vin['prevout_hash'] + ':' + str(vin['prevout_n']) for vin in my_coins]
for i, txin in enumerate(tx.inputs()):
outpoint = txin['prevout_hash'] + ':' + str(txin['prevout_n'])
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
tx._inputs[i]['value'] = my_coins[my_index]['value']
return tx
except:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_critical(_("Electron Cash was unable to parse your transaction"))
return
# Due to the asynchronous nature of the qr reader we need to keep the
# dialog instance as member variable to prevent reentrancy/multiple ones
# from being presented at once.
_qr_dialog = None
def read_tx_from_qrcode(self):
if self._qr_dialog:
# Re-entrancy prevention -- there is some lag between when the user
# taps the QR button and the modal dialog appears. We want to
# prevent multiple instances of the dialog from appearing, so we
# must do this.
self.print_error("Warning: QR dialog is already presented, ignoring.")
return
if self.gui_object.warn_if_cant_import_qrreader(self):
return
from electroncash import get_config
from .qrreader import QrReaderCameraDialog
data = ''
self._qr_dialog = None
try:
self._qr_dialog = QrReaderCameraDialog(parent=self.top_level_window())
def _on_qr_reader_finished(success: bool, error: str, result):
if self._qr_dialog:
self._qr_dialog.deleteLater(); self._qr_dialog = None
if not success:
if error:
self.show_error(error)
return
if not result:
return
# if the user scanned a bitcoincash URI
if result.lower().startswith(networks.net.CASHADDR_PREFIX + ':') or result.lower().startswith(networks.net.SLPADDR_PREFIX + ':'):
self.pay_to_URI(result)
return
# else if the user scanned an offline signed tx
try:
result = bh2u(bitcoin.base_decode(result, length=None, base=43))
tx = self.tx_from_text(result) # will show an error dialog on error
if not tx:
return
except BaseException as e:
self.show_error(str(e))
return
self.show_transaction(tx)
self._qr_dialog.qr_finished.connect(_on_qr_reader_finished)
self._qr_dialog.start_scan(get_config().get_video_device())
except BaseException as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self._qr_dialog = None
self.show_error(str(e))
def read_tx_from_file(self, *, fileName = None):
fileName = fileName or self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r", encoding='utf-8') as f:
file_content = f.read()
file_content = file_content.strip()
tx_file_dict = json.loads(str(file_content))
except (ValueError, IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
tx = self.tx_from_text(file_content)
return tx
def do_process_from_text(self):
from electroncash.transaction import SerializationError
text = text_dialog(self.top_level_window(), _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self, *, fileName = None):
from electroncash.transaction import SerializationError
try:
tx = self.read_tx_from_file(fileName=fileName)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self, *, txid=None, parent=None):
parent = parent or self
if self.gui_object.warn_if_no_network(parent):
return
from electroncash import transaction
ok = txid is not None
if not ok:
txid, ok = QInputDialog.getText(parent, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
ok, r = self.network.get_raw_tx_for_txid(txid, timeout=10.0)
if not ok:
parent.show_message(_("Error retrieving transaction") + ":\n" + r)
return
tx = transaction.Transaction(r, sign_schnorr=self.wallet.is_schnorr_enabled()) # note that presumably the tx is already signed if it comes from blockchain so this sign_schnorr parameter is superfluous, but here to satisfy my OCD -Calin
self.show_transaction(tx)
def export_bip38_dialog(self):
''' Convenience method. Simply calls self.export_privkeys_dialog(bip38=True) '''
self.export_privkeys_dialog(bip38 = True)
@protected
def export_privkeys_dialog(self, password, *, bip38=False):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
if bip38:
self.show_error(_('WARNING: This is a multi-signature wallet.') + '\n' +
_("It cannot be used with BIP38 encrypted keys."))
return
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
if bip38:
if not bitcoin.Bip38Key.canEncrypt() or not bitcoin.Bip38Key.isFast():
self.show_error(_("BIP38 Encryption is not available. Please install 'pycryptodomex' and restart Electron Cash to enable BIP38."))
return
passphrase = self.get_passphrase_dialog(
msg = (
_("You are exporting your wallet's private keys as BIP38 encrypted keys.") + "\n\n" +
_("You must specify a passphrase to use for encryption.") + "\n" +
_("Save this passphrase so you may decrypt your BIP38 keys later.")
)
)
if not passphrase:
# user cancel
return
bip38 = passphrase # overwrite arg with passphrase.. for use down below ;)
class MyWindowModalDialog(WindowModalDialog):
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
d = MyWindowModalDialog(self.top_level_window(), _('Private keys'))
weak_d = Weak.ref(d)
d.setObjectName('WindowModalDialog - Private Key Export')
destroyed_print_error(d) # track object lifecycle
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
lines = [ _("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties.") ]
if bip38:
del lines[0] # No need to scream-WARN them since BIP38 *are* encrypted
msg = '\n'.join(lines)
vbox.addWidget(QLabel(msg))
if bip38:
wwlbl = WWLabel()
def set_ww_txt(pf_shown=False):
if pf_shown:
pf_text = ( ("<font face='{monoface}' size=+1><b>".format(monoface=MONOSPACE_FONT))
+ bip38
+ ('</b></font> <a href="hide">{link}</a>'.format(link=_("Hide"))) )
else:
pf_text = '<a href="show">{link}</a>'.format(link=_("Click to show"))
wwlbl.setText(
_("The below keys are BIP38 <i>encrypted</i> using the passphrase: {passphrase}<br>"
"Please <i>write this passphrase down</i> and store it in a secret place, separate from these encrypted keys."
).format(passphrase=pf_text)
)
def toggle_ww_txt(link):
set_ww_txt(link=="show")
set_ww_txt()
wwlbl.linkActivated.connect(toggle_ww_txt)
vbox.addWidget(wwlbl)
e = QTextEdit()
e.setFont(QFont(MONOSPACE_FONT))
e.setWordWrapMode(QTextOption.NoWrap)
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electron-cash-private-keys.csv' if not bip38 else 'electron-cash-bip38-keys.csv'
select_msg = _('Select file to export your private keys to')
box, filename_e, csv_button = filename_field(self.config, defaultname, select_msg)
vbox.addSpacing(12)
vbox.addWidget(box)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
stop = False
def privkeys_thread():
for addr in addresses:
if not bip38:
# This artificial sleep is likely a security / paranoia measure
# to allow user to cancel or to make the process "feel expensive".
# In the bip38 case it's already slow enough so this delay
# is not needed.
time.sleep(0.100)
if stop:
return
try:
privkey = self.wallet.export_private_key(addr, password)
if bip38 and privkey:
privkey = str(bitcoin.Bip38Key.encrypt(privkey, bip38)) # __str__() -> base58 encoded bip38 key
except InvalidPassword:
# See #921 -- possibly a corrupted wallet or other strangeness
privkey = 'INVALID_PASSWORD'
if self.is_slp_wallet: #TODO: also create special prefix for SLP wallet private keys
private_keys[addr.to_full_string(Address.FMT_SLPADDR)] = privkey
else:
private_keys[addr.to_full_string(Address.FMT_CASHADDR)] = privkey
strong_d = weak_d()
try:
if strong_d and not stop:
strong_d.computing_privkeys_signal.emit()
else:
return
finally:
del strong_d
if stop:
return
strong_d = weak_d()
if strong_d:
strong_d.show_privkeys_signal.emit()
def show_privkeys():
nonlocal stop
if stop:
return
s = "\n".join('{:45} {}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
stop = True
thr = None
def on_dialog_closed(*args):
nonlocal stop
stop = True
try: d.computing_privkeys_signal.disconnect()
except TypeError: pass
try: d.show_privkeys_signal.disconnect()
except TypeError: pass
try: d.finished.disconnect()
except TypeError: pass
if thr and thr.is_alive():
thr.join(timeout=1.0) # wait for thread to end for maximal GC mojo
def computing_privkeys_slot():
if stop:
return
e.setText(_("Please wait... {num}/{total}").format(num=len(private_keys),total=len(addresses)))
d.computing_privkeys_signal.connect(computing_privkeys_slot)
d.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
thr = threading.Thread(target=privkeys_thread, daemon=True)
thr.start()
res = d.exec_()
if not res:
stop = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electron Cash was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+", encoding='utf-8') as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
data = f.read()
data = json.loads(data)
if type(data) is not dict or not len(data) or not all(type(v) is str and type(k) is str for k,v in data.items()):
self.show_critical(_("The file you selected does not appear to contain labels."))
return
for key, value in data.items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
self.utxo_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electron-cash_labels.json', "*.json")
if fileName:
with open(fileName, 'w+', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self.top_level_window(), _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electron-cash-history.csv')
select_msg = _('Select file to export your wallet transactions to')
box, filename_e, csv_button = filename_field(self.config, defaultname, select_msg)
vbox.addWidget(box)
include_addresses_chk = QCheckBox(_("Include addresses"))
include_addresses_chk.setChecked(True)
include_addresses_chk.setToolTip(_("Include input and output addresses in history export"))
vbox.addWidget(include_addresses_chk)
fee_dl_chk = QCheckBox(_("Fetch accurate fees from network (slower)"))
fee_dl_chk.setChecked(self.is_fetch_input_data())
fee_dl_chk.setEnabled(bool(self.wallet.network))
fee_dl_chk.setToolTip(_("If this is checked, accurate fee and input value data will be retrieved from the network"))
vbox.addWidget(fee_dl_chk)
fee_time_w = QWidget()
fee_time_w.setToolTip(_("The amount of overall time in seconds to allow for downloading fee data before giving up"))
hbox = QHBoxLayout(fee_time_w)
hbox.setContentsMargins(20, 0, 0, 0)
hbox.addWidget(QLabel(_("Timeout:")), 0, Qt.AlignRight)
fee_time_sb = QSpinBox()
fee_time_sb.setMinimum(10)
fee_time_sb.setMaximum(9999)
fee_time_sb.setSuffix(" " + _("seconds"))
fee_time_sb.setValue(30)
fee_dl_chk.clicked.connect(fee_time_w.setEnabled)
fee_time_w.setEnabled(fee_dl_chk.isChecked())
hbox.addWidget(fee_time_sb, 0, Qt.AlignLeft)
hbox.addStretch(1)
vbox.addWidget(fee_time_w)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
res = d.exec_()
d.setParent(None) # for python GC
if not res:
return
filename = filename_e.text()
if not filename:
return
success = False
try:
# minimum 10s time for calc. fees, etc
timeout = max(fee_time_sb.value() if fee_dl_chk.isChecked() else 10.0, 10.0)
success = self.do_export_history(filename, csv_button.isChecked(),
download_inputs=fee_dl_chk.isChecked(),
timeout=timeout,
include_addresses=include_addresses_chk.isChecked())
except Exception as reason:
export_error_label = _("Electron Cash was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
else:
if success:
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def is_fetch_input_data(self):
''' default on if network.auto_connect is True, otherwise use config value '''
return bool(self.wallet and self.wallet.network and self.config.get('fetch_input_data', self.wallet.network.auto_connect))
def set_fetch_input_data(self, b):
self.config.set_key('fetch_input_data', bool(b))
def do_export_history(self, fileName, is_csv, *, download_inputs=False, timeout=30.0, include_addresses=True):
wallet = self.wallet
if not wallet:
return
dlg = None # this will be set at the bottom of this function
def task():
def update_prog(x):
if dlg: dlg.update_progress(int(x*100))
return wallet.export_history(fx=self.fx,
show_addresses=include_addresses,
decimal_point=self.decimal_point,
fee_calc_timeout=timeout,
download_inputs=download_inputs,
progress_callback=update_prog)
success = False
def on_success(history):
nonlocal success
ccy = (self.fx and self.fx.get_currency()) or ''
has_fiat_columns = history and self.fx and self.fx.show_history() and 'fiat_value' in history[0] and 'fiat_balance' in history[0] and 'fiat_fee' in history[0]
lines = []
for item in history:
if is_csv:
cols = [item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['fee'], item['date']]
if has_fiat_columns:
cols += [item['fiat_value'], item['fiat_balance'], item['fiat_fee']]
if include_addresses:
inaddrs_filtered = (x for x in (item.get('input_addresses') or [])
if Address.is_valid(x))
outaddrs_filtered = (x for x in (item.get('output_addresses') or [])
if Address.is_valid(x))
cols.append( ','.join(inaddrs_filtered) )
cols.append( ','.join(outaddrs_filtered) )
lines.append(cols)
else:
if has_fiat_columns and ccy:
item['fiat_currency'] = ccy # add the currency to each entry in the json. this wastes space but json is bloated anyway so this won't hurt too much, we hope
elif not has_fiat_columns:
# No need to include these fields as they will always be 'No Data'
item.pop('fiat_value', None)
item.pop('fiat_balance', None)
item.pop('fiat_fee', None)
lines.append(item)
with open(fileName, "w+", encoding="utf-8") as f: # ensure encoding to utf-8. Avoid Windows cp1252. See #1453.
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
cols = ["transaction_hash","label", "confirmations", "value", "fee", "timestamp"]
if has_fiat_columns:
cols += [f"fiat_value_{ccy}", f"fiat_balance_{ccy}", f"fiat_fee_{ccy}"] # in CSV mode, we use column names eg fiat_value_USD, etc
if include_addresses:
cols += ["input_addresses", "output_addresses"]
transaction.writerow(cols)
for line in lines:
transaction.writerow(line)
else:
f.write(json.dumps(lines, indent=4))
success = True
# kick off the waiting dialog to do all of the above
dlg = WaitingDialog(self.top_level_window(),
_("Exporting history, please wait ..."),
task, on_success, self.on_error, disable_escape_key=True,
auto_exec=False, auto_show=False, progress_bar=True, progress_min=0, progress_max=100)
dlg.exec_()
# this will block heere in the WaitingDialog event loop... and set success to True if success
return success
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self.top_level_window(), title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
bip38_warn_label = QLabel(_("<b>BIP38 support is disabled because a requisite library is not installed.</b> Please install 'cryptodomex' or omit BIP38 private keys (private keys starting in 6P...). Decrypt keys to WIF format (starting with 5, K, or L) in order to sweep."))
bip38_warn_label.setWordWrap(True)
bip38_warn_label.setHidden(True)
vbox.addWidget(bip38_warn_label)
extra = ""
if bitcoin.is_bip38_available():
extra += " " + _('or BIP38 keys')
vbox.addWidget(QLabel(_("Enter private keys") + extra + " :"))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText(), allow_bip38=True)
def has_bip38_keys_but_no_bip38():
if bitcoin.is_bip38_available():
return False
keys = [k for k in keys_e.toPlainText().split() if k]
return any(bitcoin.is_bip38_key(k) for k in keys)
def enable_sweep():
bad_bip38 = has_bip38_keys_but_no_bip38()
sweepok = bool(get_address_text() and not bad_bip38 and get_priv_keys())
sweep_button.setEnabled(sweepok)
bip38_warn_label.setHidden(not bad_bip38)
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
res = d.exec_()
d.setParent(None)
if not res:
return
try:
self.do_clear()
keys = get_priv_keys()
bip38s = {}
for i, k in enumerate(keys):
if bitcoin.is_bip38_key(k):
bip38s[k] = i
if bip38s:
# For all the BIP38s detected, prompt for password
from .bip38_importer import Bip38Importer
d2 = Bip38Importer(bip38s.keys(), parent=self.top_level_window())
d2.exec_()
d2.setParent(None)
if d2.decoded_keys:
for k,tup in d2.decoded_keys.items():
wif, adr = tup
# rewrite the keys they specified with the decrypted WIF in the keys list for sweep_preparations to work below...
i = bip38s[k]
keys[i] = wif
else:
self.show_message(_("User cancelled"))
return
coins, keypairs = sweep_preparations(keys, self.network)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except BaseException as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self.top_level_window(), title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad, bad_info = [], []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
bad_info.append("{}: {}".format(key, str(e)))
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_warning(_("The following could not be imported") + ':\n' + '\n'.join(bad), detail_text='\n\n'.join(bad_info))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
if bitcoin.is_bip38_available():
msg += " " + _('or BIP38 keys')
def func(key):
if bitcoin.is_bip38_available() and bitcoin.is_bip38_key(key):
from .bip38_importer import Bip38Importer
d = Bip38Importer([key], parent=self.top_level_window(),
message = _('A BIP38 key was specified, please enter a password to decrypt it'),
show_count = False)
d.exec_()
d.setParent(None) # python GC quicker if this happens
if d.decoded_keys:
wif, adr = d.decoded_keys[key]
return self.wallet.import_private_key(wif, password)
else:
raise util.UserCancelled()
else:
return self.wallet.import_private_key(key, password)
self._do_import(title, msg, func)
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
if not self.amount_e.isVisible():
self.fiat_send_e.setVisible(False)
else:
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def cashaddr_icon(self):
if self.config.get('addr_format', 0) == 1:
return QIcon(":icons/tab_converter.svg")
elif self.config.get('addr_format', 0)==2:
return QIcon(":icons/tab_converter_slp.svg")
else:
return QIcon(":icons/tab_converter_bw.svg")
def update_cashaddr_icon(self):
self.addr_converter_button.setIcon(self.cashaddr_icon())
def toggle_cashaddr_status_bar(self):
self.toggle_cashaddr(self.config.get('addr_format', 2))
def toggle_cashaddr_settings(self,state):
self.toggle_cashaddr(state, True)
def toggle_cashaddr(self, format, specified = False):
#Gui toggle should just increment, if "specified" is True it is being set from preferences, so leave the value as is.
if specified==False:
if self.is_slp_wallet:
max_format=2
else:
max_format=1
format+=1
if format > max_format:
format=0
self.config.set_key('addr_format', format)
Address.show_cashaddr(format)
self.setAddrFormatText(format)
for window in self.gui_object.windows:
window.cashaddr_toggled_signal.emit()
def setAddrFormatText(self, format):
try:
if format == 0:
self.addr_format_label.setText("Addr Format: Legacy")
elif format == 1:
self.addr_format_label.setText("Addr Format: Cash")
else:
self.addr_format_label.setText("Addr Format: SLP")
except AttributeError:
pass
def settings_dialog(self):
class SettingsModalDialog(WindowModalDialog):
shown_signal = pyqtSignal()
def showEvent(self, e):
super().showEvent(e)
self.shown_signal.emit()
self.need_restart = False
dialog_finished = False
d = SettingsModalDialog(self.top_level_window(), _('Preferences'))
d.setObjectName('WindowModalDialog - Preferences')
destroyed_print_error(d)
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
misc_widgets = []
global_tx_widgets, per_wallet_tx_widgets = [], []
addr_format_choices = ["Legacy Format","CashAddr Format","SLP Format"]
addr_format_dict={'Legacy Format':0,'CashAddr Format':1,'SLP Format':2}
msg = _('Choose which format the wallet displays for Bitcoin Cash addresses')
addr_format_label = HelpLabel(_('Address Format') + ':', msg)
addr_format_combo = QComboBox()
addr_format_combo.addItems(addr_format_choices)
addr_format_combo.setCurrentIndex(self.config.get("addr_format", 0))
addr_format_combo.currentIndexChanged.connect(self.toggle_cashaddr_settings)
gui_widgets.append((addr_format_label,addr_format_combo))
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electroncash.i18n import languages, get_system_language_match, match_language
language_names = []
language_keys = []
for (lang_code, lang_def) in languages.items():
language_keys.append(lang_code)
lang_name = []
lang_name.append(lang_def.name)
if lang_code == '':
# System entry in languages list (==''), gets system setting
sys_lang = get_system_language_match()
if sys_lang:
lang_name.append(f' [{languages[sys_lang].name}]')
language_names.append(''.join(lang_name))
lang_combo.addItems(language_names)
conf_lang = self.config.get("language", '')
if conf_lang:
# The below code allows us to rename languages in saved config and
# have them still line up with languages in our languages dict.
# For example we used to save English as en_UK but now it's en_US
# and it will still match
conf_lang = match_language(conf_lang)
try: index = language_keys.index(conf_lang)
except ValueError: index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]:
w.setEnabled(False)
def on_lang(x):
lang_request = language_keys[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.update_tabs()
self.update_status()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_customfee(x):
amt = customfee_e.get_amount()
m = int(amt * 1000.0) if amt is not None else None
self.config.set_key('customfee', m)
self.fee_slider.update()
if self.fee_custom_lbl.text() == '':
self.fee_slider_mogrifier(self.get_custom_fee_text())
else:
self.fee_slider_mogrifier()
fee_gb = QGroupBox(_('Fees'))
fee_lo = QGridLayout(fee_gb)
customfee_e = BTCSatsByteEdit()
customfee_e.setAmount(self.config.custom_fee_rate() / 1000.0 if self.config.has_custom_fee_rate() else None)
customfee_e.textChanged.connect(on_customfee)
customfee_label = HelpLabel(_('Custom fee rate:'), _('Custom Fee Rate in Satoshis per byte'))
fee_lo.addWidget(customfee_label, 0, 0, 1, 1, Qt.AlignRight)
fee_lo.addWidget(customfee_e, 0, 1, 1, 1, Qt.AlignLeft)
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_lo.addWidget(feebox_cb, 1, 0, 1, 2, Qt.AlignJustify)
# Fees box up top
misc_widgets.append((fee_gb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link/']) + '\n\n'\
+ _('For more information, see http://openalias.org')
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_gb = QGroupBox(_("Identity"))
id_form = QFormLayout(id_gb)
id_form.addRow(alias_label, alias_e)
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_form.addRow(SSL_id_label, SSL_id_e)
# Identity box in middle of this tab
misc_widgets.append((id_gb, None)) # commit id_form/id_gb to master layout via this data structure
from . import exception_window as ew
cr_gb = QGroupBox(_("Crash Reporter"))
cr_grid = QGridLayout(cr_gb)
cr_chk = QCheckBox()
cr_chk.setChecked(ew.is_enabled(self.config))
cr_chk.clicked.connect(lambda b: ew.set_enabled(self.config, b))
cr_help = HelpLabel(_("Crash reporter enabled"),
_("The crash reporter is the error window which pops-up when Electron Cash encounters an internal error.\n\n"
"It is recommended that you leave this option enabled, so that developers can be notified of any internal bugs. "
"When a crash is encountered you are asked if you would like to send a report.\n\n"
"Private information is never revealed in crash reports to developers."))
# The below dance ensures the checkbox is horizontally centered in the widget
cr_grid.addWidget(QWidget(), 0, 0, 1, 1) # dummy spacer
cr_grid.addWidget(cr_chk, 0, 1, 1, 1, Qt.AlignRight)
cr_grid.addWidget(cr_help, 0, 2, 1, 1, Qt.AlignLeft)
cr_grid.addWidget(QWidget(), 0, 3, 1, 1) # dummy spacer
cr_grid.setColumnStretch(0, 1)
cr_grid.setColumnStretch(3, 1)
# Crash reporter box at bottom of this tab
misc_widgets.append((cr_gb, None)) # commit crash reporter gb to layout
units = util.base_unit_labels # ( 'BCH', 'mBCH', 'bits' )
msg = _('Base unit of your wallet.')\
+ '\n1 BCH = 1,000 mBCH = 1,000,000 bits.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
dp = util.base_units.get(unit_result)
if dp is not None:
self.decimal_point = dp
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_tabs()
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = web.BE_sorted_list()
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online block explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(web.BE_from_config(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
qr_combo = QComboBox()
qr_label = HelpLabel(_('Video device'), '')
qr_did_scan = False
def set_no_camera(e=''):
# Older Qt or missing libs -- disable GUI control and inform user why
qr_combo.setEnabled(False)
qr_combo.clear()
qr_combo.addItem(_("Default"), "default")
qr_combo.setToolTip(_("Unable to probe for cameras on this system. QtMultimedia is likely missing."))
qr_label.setText(_('Video device') + ' ' + _('(disabled)') + ':')
qr_label.help_text = qr_combo.toolTip() + "\n\n" + str(e)
qr_label.setToolTip(qr_combo.toolTip())
def scan_cameras():
nonlocal qr_did_scan
if qr_did_scan or dialog_finished: # dialog_finished guard needed because QueuedConnection
# already scanned or dialog finished quickly
return
qr_did_scan = True
system_cameras = []
try:
from PyQt5.QtMultimedia import QCameraInfo
except ImportError as e:
set_no_camera(e)
return
system_cameras = QCameraInfo.availableCameras()
qr_combo.clear()
qr_combo.addItem(_("Default"), "default")
qr_label.setText(_('Video device') + ':')
qr_label.help_text = _("For scanning QR codes.")
qr_combo.setToolTip(qr_label.help_text)
qr_label.setToolTip(qr_label.help_text)
for cam in system_cameras:
qr_combo.addItem(cam.description(), cam.deviceName())
video_device = self.config.get("video_device")
video_device_index = 0
if video_device:
video_device_index = max(0, qr_combo.findData(video_device)) # if not found, default to 0 (the default item)
qr_combo.setCurrentIndex(video_device_index)
qr_combo.setEnabled(True)
def on_video_device(x):
if qr_combo.isEnabled():
self.config.set_key("video_device", qr_combo.itemData(x), True)
set_no_camera() # pre-populate combo box with default so it has a sizeHint
d.shown_signal.connect(scan_cameras, Qt.QueuedConnection) # do the camera scan once dialog is shown, using QueuedConnection so it's called from top level event loop and not from the showEvent handler
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Default'), 'default') # We can't name this "light" in the UI as sometimes the default is actually dark-looking eg on Mojave or on some Linux desktops.
colortheme_combo.addItem(_('Dark'), 'dark')
theme_name = self.config.get('qt_gui_color_theme', 'default')
dark_theme_available = self.gui_object.is_dark_theme_available()
if theme_name == 'dark' and not dark_theme_available:
theme_name = 'default'
index = colortheme_combo.findData(theme_name)
if index < 0: index = 0
colortheme_combo.setCurrentIndex(index)
if sys.platform in ('darwin',) and not dark_theme_available:
msg = _("Color theme support is provided by macOS if using Mojave or above."
" Use the System Preferences to switch color themes.")
err_msg = msg
else:
msg = ( _("Dark theme support requires the package 'QDarkStyle' (typically installed via the 'pip3' command on Unix & macOS).")
if not dark_theme_available
else '' )
err_msg = _("Dark theme is not available. Please install QDarkStyle to access this feature.")
lbltxt = _('Color theme') + ':'
colortheme_label = HelpLabel(lbltxt, msg) if msg else QLabel(lbltxt)
def on_colortheme(x):
item_data = colortheme_combo.itemData(x)
if not dark_theme_available and item_data == 'dark':
self.show_error(err_msg)
colortheme_combo.setCurrentIndex(0)
return
self.config.set_key('qt_gui_color_theme', item_data, True)
if theme_name != item_data:
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
if sys.platform not in ('darwin',):
# Enable/Disable HighDPI -- this option makes no sense for macOS
# and thus does not appear on that platform
hidpi_chk = QCheckBox(_('Automatic high-DPI scaling'))
if sys.platform in ('linux',):
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as overly large status bar icons)"))
else: # windows
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as dialog box text being cut off"))
hidpi_chk.setChecked(bool(self.config.get('qt_enable_highdpi', True)))
if self.config.get('qt_disable_highdpi'):
hidpi_chk.setToolTip(_('Automatic high DPI scaling was disabled from the command-line'))
hidpi_chk.setChecked(False)
hidpi_chk.setDisabled(True)
def on_hi_dpi_toggle():
self.config.set_key('qt_enable_highdpi', hidpi_chk.isChecked())
self.need_restart = True
hidpi_chk.stateChanged.connect(on_hi_dpi_toggle)
gui_widgets.append((hidpi_chk, None))
if sys.platform in ('win32', 'cygwin'):
# Enable/Disable the use of the FreeType library on Qt
# (Windows only)
freetype_chk = QCheckBox(_('Use FreeType for font rendering'))
freetype_chk.setChecked(self.gui_object.windows_qt_use_freetype)
freetype_chk.setEnabled(self.config.is_modifiable('windows_qt_use_freetype'))
freetype_chk.setToolTip(_("Enable/disable this option if you experience font rendering glitches (such as blurred text or monochrome emoji characters)"))
def on_freetype_chk():
self.gui_object.windows_qt_use_freetype = freetype_chk.isChecked() # property has a method backing it
self.need_restart = True
freetype_chk.stateChanged.connect(on_freetype_chk)
gui_widgets.append((freetype_chk, None))
elif sys.platform in ('linux',):
# Enable/Disable the use of the fonts.xml FontConfig override
# (Linux only)
fontconfig_chk = QCheckBox(_('Use custom fontconfig for emojis'))
fontconfig_chk.setChecked(self.gui_object.linux_qt_use_custom_fontconfig)
fontconfig_chk.setEnabled(self.config.is_modifiable('linux_qt_use_custom_fontconfig'))
fontconfig_chk.setToolTip(_("Enable/disable this option if you experience font rendering glitches (such as blurred text or monochrome emoji characters)"))
def on_fontconfig_chk():
self.gui_object.linux_qt_use_custom_fontconfig = fontconfig_chk.isChecked() # property has a method backing it
self.need_restart = True
fontconfig_chk.stateChanged.connect(on_fontconfig_chk)
gui_widgets.append((fontconfig_chk, None))
gui_widgets.append((None, None)) # spacer
updatecheck_cb = QCheckBox(_("Automatically check for updates"))
updatecheck_cb.setChecked(self.gui_object.has_auto_update_check())
updatecheck_cb.setToolTip(_("Enable this option if you wish to be notified as soon as a new version of Electron Cash becomes available"))
def on_set_updatecheck(v):
self.gui_object.set_auto_update_check(v == Qt.Checked)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
notify_tx_cb = QCheckBox(_('Notify when receiving funds'))
notify_tx_cb.setToolTip(_('If enabled, a system notification will be presented when you receive funds to this wallet.'))
notify_tx_cb.setChecked(bool(self.wallet.storage.get('gui_notify_tx', True)))
def on_notify_tx(b):
self.wallet.storage.put('gui_notify_tx', bool(b))
notify_tx_cb.stateChanged.connect(on_notify_tx)
per_wallet_tx_widgets.append((notify_tx_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
if self.force_use_single_change_addr:
usechange_cb.setChecked(True)
usechange_cb.setEnabled(False)
if isinstance(self.force_use_single_change_addr, str):
usechange_cb.setToolTip(self.force_use_single_change_addr)
else:
usechange_cb.setChecked(self.wallet.use_change)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
per_wallet_tx_widgets.append((usechange_cb, None))
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
if self.force_use_single_change_addr:
multiple_cb.setEnabled(False)
multiple_cb.setChecked(False)
if isinstance(self.force_use_single_change_addr, str):
multiple_cb.setToolTip(self.force_use_single_change_addr)
else:
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_cb.stateChanged.connect(on_multiple)
per_wallet_tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
global_tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
enable_opreturn = bool(self.config.get('enable_opreturn'))
opret_cb = QCheckBox(_('Enable OP_RETURN output'))
opret_cb.setToolTip(_('Enable posting messages with OP_RETURN.'))
opret_cb.setChecked(enable_opreturn)
opret_cb.stateChanged.connect(self.on_toggled_opreturn)
global_tx_widgets.append((opret_cb,None))
# Schnorr
use_schnorr_cb = QCheckBox(_("Sign with Schnorr signatures"))
use_schnorr_cb.setChecked(self.wallet.is_schnorr_enabled())
use_schnorr_cb.stateChanged.connect(self.wallet.set_schnorr_enabled)
no_schnorr_reason = []
if self.wallet.is_schnorr_possible(no_schnorr_reason):
use_schnorr_cb.setEnabled(True)
use_schnorr_cb.setToolTip(_("Sign all transactions using Schnorr signatures."))
else:
# not possible (wallet type not supported); show reason in tooltip
use_schnorr_cb.setEnabled(False)
use_schnorr_cb.setToolTip(no_schnorr_reason[0])
per_wallet_tx_widgets.append((use_schnorr_cb, None))
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([pgettext('Referencing Fiat currency', 'None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
c = self.fx.get_currency()
h = self.fx.get_history_config()
else:
c, h = self.fx.default_currency, False
exchanges = self.fx.get_exchanges_by_ccy(c, h)
conf_exchange = self.fx.config_exchange()
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
idx = ex_combo.findText(conf_exchange) # try and restore previous exchange if in new list
if idx < 0:
# hmm, previous exchange wasn't in new h= setting. Try default exchange.
idx = ex_combo.findText(self.fx.default_exchange)
idx = 0 if idx < 0 else idx # if still no success (idx < 0) -> default to the first exchange in combo
if exchanges: # don't set index if no exchanges, as any index is illegal. this shouldn't happen.
ex_combo.setCurrentIndex(idx) # note this will emit a currentIndexChanged signal if it's changed
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
changed = bool(self.fx.get_history_config()) != bool(checked)
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
self.slp_history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
if changed:
self.history_list.update() # this won't happen too often as it's rate-limited
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
hist_checkbox.setText(_('Show history rates'))
fiat_address_checkbox.setText(_('Show fiat balance for addresses'))
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency:')), ccy_combo))
fiat_widgets.append((QLabel(_('Source:')), ex_combo))
fiat_widgets.append((hist_checkbox, None))
fiat_widgets.append((fiat_address_checkbox, None))
tabs_info = [
(gui_widgets, _('General')),
(misc_widgets, pgettext("The preferences -> Fees,misc tab", 'Fees && Misc.')),
(OrderedDict([
( _("App-Global Options") , global_tx_widgets ),
( _("Per-Wallet Options") , per_wallet_tx_widgets),
]), _('Transactions')),
(fiat_widgets, _('Fiat')),
]
def add_tabs_info_to_tabs(tabs, tabs_info):
def add_widget_pair(a,b,grid):
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
if a:
grid.addWidget(a, i, 0, 1, 2)
else:
grid.addItem(QSpacerItem(15, 15), i, 0, 1, 2)
for thing, name in tabs_info:
tab = QWidget()
if isinstance(thing, dict):
# This Prefs tab is laid out as groupboxes one atop another...
d = thing
vbox = QVBoxLayout(tab)
for groupName, widgets in d.items():
gbox = QGroupBox(groupName)
grid = QGridLayout(gbox)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
vbox.addWidget(gbox, len(widgets))
else:
# Standard layout.. 1 tab has just a grid of widgets
widgets = thing
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
tabs.addTab(tab, name)
# / add_tabs_info_to_tabs
add_tabs_info_to_tabs(tabs, tabs_info)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
try:
# run the dialog
d.exec_()
finally:
dialog_finished = True # paranoia for scan_cameras
d.setParent(None) # for Python GC
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_message(_('Please restart Electron Cash to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice.
# clean_up() guards against that situation.
self.clean_up()
super().closeEvent(event)
event.accept() # paranoia. be sure it's always accepted.
def is_alive(self): return bool(not self.cleaned_up)
def clean_up_connections(self):
def disconnect_signals():
for attr_name in dir(self):
if attr_name.endswith("_signal"):
sig = getattr(self, attr_name)
if isinstance(sig, pyqtBoundSignal):
try: sig.disconnect()
except TypeError: pass # no connections
elif attr_name.endswith("__RateLimiter"): # <--- NB: this needs to match the attribute name in util.py rate_limited decorator
rl_obj = getattr(self, attr_name)
if isinstance(rl_obj, RateLimiter):
rl_obj.kill_timer()
try: self.disconnect()
except TypeError: pass
# Work-around to PyQt bugs. See EC issue #1532
try: self.gui_object.update_available_signal.disconnect(self.on_update_available) # shows/hides the update_available_button, emitted by update check mechanism when a new version is available
except TypeError: pass
def disconnect_network_callbacks():
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
# /
disconnect_network_callbacks()
disconnect_signals()
def clean_up_children(self):
# Status bar holds references to self, so clear it to help GC this window
self.setStatusBar(None)
# Note that due to quirks on macOS and the shared menu bar, we do *NOT*
# clear the menuBar. Instead, doing this causes the object to get
# deleted and/or its actions (and more importantly menu action hotkeys)
# to go away immediately.
self.setMenuBar(None)
# Disable shortcuts immediately to prevent them from accidentally firing
# on us after we are closed. They will get deleted when this QObject
# is finally deleted by Qt.
for shortcut in self._shortcuts:
shortcut.setEnabled(False)
del shortcut
self._shortcuts.clear()
# Reparent children to 'None' so python GC can clean them up sooner rather than later.
# This also hopefully helps accelerate this window's GC.
children = [c for c in self.children()
if (isinstance(c, (QWidget, QAction, TaskThread))
and not isinstance(c, (QStatusBar, QMenuBar, QFocusFrame, QShortcut)))]
for c in children:
try: c.disconnect()
except TypeError: pass
c.setParent(None)
def clean_up(self):
if self.cleaned_up:
return
self.cleaned_up = True
if self.wallet.thread: # guard against window close before load_wallet was called (#1554)
self.wallet.thread.stop()
self.wallet.thread.wait() # Join the thread to make sure it's really dead.
if self.wallet.ui_emit_validity_updated:
self.wallet.ui_emit_validity_updated = None # detach callback
if self.wallet.ui_emit_validation_fetch:
self.wallet.ui_emit_validation_fetch = None
self.tx_update_mgr.clean_up() # disconnects some signals
# We catch these errors with the understanding that there is no recovery at
# this point, given user has likely performed an action we cannot recover
# cleanly from. So we attempt to exit as cleanly as possible.
try:
self.config.set_key("is_maximized", self.isMaximized())
self.config.set_key("console-history", self.console.history[-50:], True)
except (OSError, PermissionError) as e:
self.print_error("unable to write to config (directory removed?)", e)
if not self.isMaximized():
try:
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
except (OSError, PermissionError) as e:
self.print_error("unable to write to wallet storage (directory removed?)", e)
# Should be no side-effects in this function relating to file access past this point.
if self.qr_window:
self.qr_window.close()
self.qr_window = None # force GC sooner rather than later.
for d in list(self._tx_dialogs):
# clean up all extant tx dialogs we opened as they hold references
# to us that will be invalidated
d.prompt_if_unsaved = False # make sure to unconditionally close
d.close()
for d in list(self._slp_dialogs):
d.close() # make sure dialogs we created are properly closed!
self._close_wallet()
try: self.gui_object.timer.timeout.disconnect(self.timer_actions)
except TypeError: pass # defensive programming: this can happen if we got an exception before the timer action was connected
self.gui_object.close_window(self) # implicitly runs the hook: on_close_window
# Now, actually STOP the wallet's synchronizer and verifiers and remove
# it from the daemon. Note that its addresses will still stay
# 'subscribed' to the ElectrumX server until we connect to a new server,
# (due to ElectrumX protocol limitations).. but this is harmless.
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
# At this point all plugins should have removed any references to this window.
# Now, just to be paranoid, do some active destruction of signal/slot connections as well as
# Removing child widgets forcefully to speed up Python's own GC of this window.
self.clean_up_connections()
self.clean_up_children()
# And finally, print when we are destroyed by C++ for debug purposes
# We must call this here as above calls disconnected all signals
# involving this widget.
destroyed_print_error(self)
def internal_plugins_dialog(self):
if self.internalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.internalpluginsdialog.raise_()
return
d = WindowModalDialog(self.top_level_window(), _('Optional Features'))
weakD = Weak.ref(d)
gui_object = self.gui_object
plugins = gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.get_internal_plugin_count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
weakGrid = Weak.ref(grid)
w.setLayout(grid)
settings_widgets = Weak.ValueDictionary()
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
grid = weakGrid()
d = weakD()
if d and grid and not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
if not p:
# Need to delete settings widget because keeping it around causes bugs as it points to a now-dead plugin instance
settings_widgets.pop(name)
widget.hide(); widget.setParent(None); widget.deleteLater(); widget = None
def do_toggle(weakCb, name, i):
cb = weakCb()
if cb:
p = plugins.toggle_internal_plugin(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# All plugins get this whenever one is toggled.
run_hook('init_qt', gui_object)
for i, descr in enumerate(plugins.internal_plugin_metadata.values()):
name = descr['__name__']
p = plugins.get_internal_plugin(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
weakCb = Weak.ref(cb)
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_internal_plugin_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, weakCb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stderr)
grid.setRowStretch(len(plugins.internal_plugin_metadata.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
self.internalpluginsdialog = d
d.exec_()
self.internalpluginsdialog = None # Python GC please!
def external_plugins_dialog(self):
if self.externalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.externalpluginsdialog.raise_()
return
from . import external_plugins_window
d = external_plugins_window.ExternalPluginsDialog(self, _('Plugin Manager'))
self.externalpluginsdialog = d
d.exec_()
self.externalpluginsdialog = None # allow python to GC
def hardware_wallet_support(self):
if not sys.platform.startswith('linux'):
self.print_error("FIXME! hardware_wallet_support is Linux only!")
return
if self.hardwarewalletdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.hardwarewalletdialog.raise_()
return
from .udev_installer import InstallHardwareWalletSupportDialog
d = InstallHardwareWalletSupportDialog(self.top_level_window(), self.gui_object.plugins)
self.hardwarewalletdialog = d
d.exec_()
self.hardwarewalletdialog = None # allow python to GC
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self.top_level_window(), _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel(_('{total_size} bytes').format(total_size=total_size)), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
result = d.exec_()
d.setParent(None) # So Python can GC
if not result:
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
if new_tx is None:
self.show_error(_('CPFP no longer valid'))
return
self.show_transaction(new_tx)
def rebuild_history(self):
if self.gui_object.warn_if_no_network(self):
# Don't allow if offline mode.
return
msg = ' '.join([
_('This feature is intended to allow you to rebuild a wallet if it has become corrupted.'),
"\n\n"+_('Your entire transaction history will be downloaded again from the server and verified from the blockchain.'),
_('Just to be safe, back up your wallet file first!'),
"\n\n"+_("Rebuild this wallet's history now?")
])
if self.question(msg, title=_("Rebuild Wallet History")):
try:
self.wallet.rebuild_history()
except RuntimeError as e:
self.show_error(str(e))
def scan_beyond_gap(self):
if self.gui_object.warn_if_no_network(self):
return
from .scan_beyond_gap import ScanBeyondGap
d = ScanBeyondGap(self)
d.exec_()
d.setParent(None) # help along Python by dropping refct to 0
def copy_to_clipboard(self, text, tooltip=None, widget=None):
tooltip = tooltip or _("Text copied to clipboard")
widget = widget or self
qApp.clipboard().setText(text)
QToolTip.showText(QCursor.pos(), tooltip, widget)
class TxUpdateMgr(QObject, PrintError):
''' Manages new transaction notifications and transaction verified
notifications from the network thread. It collates them and sends them to
the appropriate GUI controls in the main_window in an efficient manner. '''
def __init__(self, main_window_parent):
assert isinstance(main_window_parent, ElectrumWindow), "TxUpdateMgr must be constructed with an ElectrumWindow as its parent"
super().__init__(main_window_parent)
self.cleaned_up = False
self.lock = threading.Lock() # used to lock thread-shared attrs below
# begin thread-shared attributes
self.notif_q = []
self.verif_q = []
self.need_process_v, self.need_process_n = False, False
# /end thread-shared attributes
self.weakParent = Weak.ref(main_window_parent)
main_window_parent.history_updated_signal.connect(self.verifs_get_and_clear, Qt.DirectConnection) # immediately clear verif_q on history update because it would be redundant to keep the verify queue around after a history list update
main_window_parent.on_timer_signal.connect(self.do_check, Qt.DirectConnection) # hook into main_window's timer_actions function
self.full_hist_refresh_timer = QTimer(self)
self.full_hist_refresh_timer.setInterval(1000); self.full_hist_refresh_timer.setSingleShot(False)
self.full_hist_refresh_timer.timeout.connect(self.schedule_full_hist_refresh_maybe)
def diagnostic_name(self):
return ((self.weakParent() and self.weakParent().diagnostic_name()) or "???") + "." + __class__.__name__
def clean_up(self):
self.cleaned_up = True
main_window_parent = self.weakParent() # weak -> strong ref
if main_window_parent:
try: main_window_parent.history_updated_signal.disconnect(self.verifs_get_and_clear)
except TypeError: pass
try: main_window_parent.on_timer_signal.disconnect(self.do_check)
except TypeError: pass
def do_check(self):
''' Called from timer_actions in main_window to check if notifs or
verifs need to update the GUI.
- Checks the need_process_[v|n] flags
- If either flag is set, call the @rate_limited process_verifs
and/or process_notifs functions which update GUI parent in a
rate-limited (collated) fashion (for decent GUI responsiveness). '''
with self.lock:
bV, bN = self.need_process_v, self.need_process_n
self.need_process_v, self.need_process_n = False, False
if bV: self.process_verifs() # rate_limited call (1 per second)
if bN: self.process_notifs() # rate_limited call (1 per 15 seconds)
def verifs_get_and_clear(self):
''' Clears the verif_q. This is called from the network
thread for the 'verified2' event as well as from the below
update_verifs (GUI thread), hence the lock. '''
with self.lock:
ret = self.verif_q
self.verif_q = []
self.need_process_v = False
return ret
def notifs_get_and_clear(self):
with self.lock:
ret = self.notif_q
self.notif_q = []
self.need_process_n = False
return ret
def verif_add(self, args):
# args: [wallet, tx_hash, height, conf, timestamp]
# filter out tx's not for this wallet
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if args[0] is parent.wallet:
with self.lock:
self.verif_q.append(args[1:])
self.need_process_v = True
def notif_add(self, args):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
tx, wallet = args
# filter out tx's not for this wallet
if wallet is parent.wallet:
with self.lock:
self.notif_q.append(tx)
self.need_process_n = True
@rate_limited(1.0, ts_after=True)
def process_verifs(self):
''' Update history list with tx's from verifs_q, but limit the
GUI update rate to once per second. '''
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
items = self.verifs_get_and_clear()
if items:
t0 = time.time()
parent.history_list.setUpdatesEnabled(False)
parent.slp_history_list.setUpdatesEnabled(False)
had_sorting = [ parent.history_list.isSortingEnabled(),
parent.slp_history_list.isSortingEnabled() ]
if had_sorting[0]:
parent.history_list.setSortingEnabled(False)
if had_sorting[1]:
parent.slp_history_list.setSortingEnabled(False)
n_updates = 0
for item in items:
did_update = parent.history_list.update_item(*item)
parent.slp_history_list.update_item_netupdate(*item)
n_updates += 1 if did_update else 0
self.print_error("Updated {}/{} verified txs in GUI in {:0.2f} ms"
.format(n_updates, len(items), (time.time()-t0)*1e3))
if had_sorting[0]:
parent.history_list.setSortingEnabled(True)
if had_sorting[1]:
parent.slp_history_list.setSortingEnabled(True)
parent.slp_history_list.setUpdatesEnabled(True)
parent.history_list.setUpdatesEnabled(True)
parent.update_status()
if parent.history_list.has_unknown_balances:
self.print_error("History tab: 'Unknown' balances detected, will schedule a GUI refresh after wallet settles")
self._full_refresh_ctr = 0
self.full_hist_refresh_timer.start()
_full_refresh_ctr = 0
def schedule_full_hist_refresh_maybe(self):
''' self.full_hist_refresh_timer timeout slot. May schedule a full
history refresh after wallet settles if we have "Unknown" balances. '''
parent = self.weakParent()
if self._full_refresh_ctr > 60:
# Too many retries. Give up.
self.print_error("History tab: Full refresh scheduler timed out.. wallet hasn't settled in 1 minute. Giving up.")
self.full_hist_refresh_timer.stop()
elif parent and parent.history_list.has_unknown_balances:
# Still have 'Unknown' balance. Check if wallet is settled.
if self.need_process_v or not parent.wallet.is_fully_settled_down():
# Wallet not fully settled down yet... schedule this function to run later
self.print_error("History tab: Wallet not yet settled.. will try again in 1 second...")
else:
# Wallet has settled. Schedule an update. Note this function may be called again
# in 1 second to check if the 'Unknown' situation has corrected itself.
self.print_error("History tab: Wallet has settled down, latching need_update to true")
parent.need_update.set()
self._full_refresh_ctr += 1
else:
# No more polling is required. 'Unknown' balance disappeared from
# GUI (or parent window was just closed).
self.full_hist_refresh_timer.stop()
self._full_refresh_ctr = 0
@rate_limited(5.0, classlevel=True)
def process_notifs(self):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if parent.network:
n_ok = 0
txns = self.notifs_get_and_clear()
if txns and parent.wallet.storage.get('gui_notify_tx', True):
# Combine the transactions
total_amount = 0
tokens_included = set()
for tx in txns:
if tx:
is_relevant, is_mine, v, fee = parent.wallet.get_wallet_delta(tx)
if is_relevant:
total_amount += v
n_ok += 1
if parent.is_slp_wallet:
try:
tti = parent.wallet.get_slp_token_info(tx.txid())
tokens_included.add(parent.wallet.token_types.get(tti['token_id'],{}).get('name','unknown'))
except KeyError:
pass
if tokens_included:
tokstring = _('. Tokens included: ') + ', '.join(sorted(tokens_included))
else:
tokstring = ''
if total_amount > 0:
self.print_error("Notifying GUI %d tx"%(n_ok))
if n_ok > 1:
parent.notify(_("{} new transactions: {}{}")
.format(n_ok, parent.format_amount_and_units(total_amount, is_diff=True), tokstring))
else:
parent.notify(_("New transaction: {}{}").format(parent.format_amount_and_units(total_amount, is_diff=True), tokstring))
|
multiprocess_data_loader.py | from collections import deque
import logging
from multiprocessing.process import BaseProcess
import random
import traceback
from typing import List, Iterator, Optional, Iterable, Union, TypeVar
from overrides import overrides
import torch
import torch.multiprocessing as mp
from allennlp.common.util import lazy_groups_of, shuffle_iterable
from allennlp.common.tqdm import Tqdm
from allennlp.data.instance import Instance
from allennlp.data.data_loaders.data_loader import DataLoader, TensorDict
from allennlp.data.data_loaders.data_collator import DataCollator, DefaultDataCollator
from allennlp.data.dataset_readers import DatasetReader, WorkerInfo, DatasetReaderInput
from allennlp.data.fields import TextField
from allennlp.data.samplers import BatchSampler
from allennlp.data.vocabulary import Vocabulary
import allennlp.nn.util as nn_util
logger = logging.getLogger(__name__)
_T = TypeVar("_T")
@DataLoader.register("multiprocess")
class MultiProcessDataLoader(DataLoader):
"""
The `MultiProcessDataLoader` is a [`DataLoader`](../data_loader/#dataloader)
that's optimized for AllenNLP experiments.
See
[Using your reader with multi-process or distributed data loading](/api/data/dataset_readers/dataset_reader/#datasetreader.using_your_reader_with_multi-process_or_distributed_data_loading)
for more information on how to optimize your `DatasetReader` for use with this `DataLoader`.
# Parameters
reader: `DatasetReader`, required
A `DatasetReader` used to load instances from the `data_path`.
data_path: `DatasetReaderInput`, required
Passed to `DatasetReader.read()`.
!!! Note
In a typical AllenNLP configuration file, the `reader` and `data_path` parameters don't
get an entry under the `data_loader`. The `reader` is constructed separately from
the corresponding `dataset_reader` params, and the `data_path` is taken from the
`train_data_path`, `validation_data_path`, or `test_data_path`.
batch_size: `int`, optional (default = `None`)
When `batch_sampler` is unspecified, this option can be combined with `drop_last`
and `shuffle` to control automatic batch sampling.
drop_last: `bool`, optional (default = `False`)
When `batch_sampler` is unspecified, this option can be combined with `batch_size`
and `shuffle` to control automatic batch sampling.
If `True`, the last batch will be dropped if it doesn't contain a full `batch_size`
number of `Instance`s.
shuffle: `bool`, optional (default = `False`)
When `batch_sampler` is unspecified, this option can be combined with `batch_size`
and `drop_last` to control automatic batch sampling.
batch_sampler: `BatchSampler`, optional (default = `None`)
A `BatchSampler` to handle batching. This option is mutually exclusive with
`batch_size`, `drop_last`, and `shuffle`.
batches_per_epoch: `int`, optional (default = `None`)
If specified, exactly `batches_per_epoch` batches will be generated with each call
to `__iter__()`.
num_workers: `int`, optional (default = `0`)
The number of workers to use to read `Instances` in parallel.
If `num_workers = 0`, everything is done in the main process. Otherwise `num_workers`
workers are forked or spawned (depending on the value of `start_method`), each of which
calls `read()` on their copy of the `reader`.
This means that in order for multi-process loading to be efficient when `num_workers > 1`,
the `reader` needs to implement
[`manual_multiprocess_sharding`](/api/data/dataset_readers/dataset_reader/#datasetreader).
!!! Warning
Multi-processing code in Python is complicated! We highly recommend you read the short
[Best practices](#multiprocessdataloader.best_practices) and
[Common issues](#multiprocessdataloader.common_issues) sections below before using this option.
max_instances_in_memory: `int`, optional (default = `None`)
If not specified, all instances will be read and cached in memory for the duration
of the data loader's life. This is generally ideal when your data can fit in memory
during training. However, when your datasets are too big, using this option
will turn on lazy loading, where only `max_instances_in_memory` instances are processed
at a time.
!!! Note
This setting will affect how a `batch_sampler` is applied. If
`max_instances_in_memory` is `None`, the sampler will be applied to all `Instances`.
Otherwise the sampler will be applied to only `max_instances_in_memory` `Instances`
at a time.
Therefore when using this option with a sampler, you should generally set it to a multiple of
the sampler's `batch_size` (if it has one).
start_method: `str`, optional (default = `"fork"`)
The [start method](https://docs.python.org/3.7/library/multiprocessing.html#contexts-and-start-methods)
used to spin up workers.
On Linux or OS X, "fork" usually has the lowest overhead for starting workers
but could potentially lead to dead-locks if you're using lower-level libraries that are not fork-safe.
If you run into these issues, try using "spawn" instead.
cuda_device: `Optional[Union[int, str, torch.device]]`, optional (default = `None`)
If given, batches will automatically be put on this device.
!!! Note
This should typically not be set in an AllenNLP configuration file. The `Trainer`
will automatically call [`set_target_device()`](#set_target_device) before iterating
over batches.
quiet : `bool`, optional (default = `False`)
If `True`, tqdm progress bars will be disabled.
collate_fn : `DataCollator`, optional ( default = `DefaultDataCollator`)
# Best practices
- **Large datasets**
If your dataset is too big to fit into memory (a common problem), you'll need to load it lazily.
This is done by simply setting the `max_instances_in_memory` parameter to a non-zero integer.
The optimal value depends on your use case.
If you're using a `batch_sampler`, you will generally get better samples by setting
`max_instances_in_memory` to a higher number - such as 10 to 100 times your batch size -
since this determines how many `Instances` your `batch_sampler` gets to sample from at a time.
If you're not using a `batch_sampler` then this number is much less important. Setting it to
2 to 10 times your batch size is a reasonable value.
Keep in mind that using `max_instances_in_memory` generally results in a slower
training loop unless you load data in worker processes by setting the `num_workers` option to a
non-zero integer (see below). That way data loading won't block the main process.
- **Performance**
The quickest way to increase the performance of data loading is adjust the `num_workers` parameter.
`num_workers` determines how many workers are used to read `Instances` from your
`DatasetReader`. By default, this is set to `0`, which means everything is done in the main process.
Before trying to set `num_workers` to a non-zero number, you should make sure your `DatasetReader`
is [optimized for use with multi-process data loading]
(/api/data/dataset_readers/dataset_reader/#datasetreader.using_your_reader_with_multi-process_or_distributed_data_loading).
# Common issues
- **Dead-locks**
Multiprocessing code in Python is complicated! Especially code that involves lower-level libraries
which may be spawning their own threads. If you run into dead-locks while
using `num_workers > 0`, luckily there are two simple work-arounds which usually fix the issue.
The first work-around is to disable parallelism for these low-level libraries.
For example, setting the environment variables `OMP_NUM_THREADS=1` and `TOKENIZERS_PARALLELISM=0`
will do so for PyTorch and Numpy (for CPU operations) and HuggingFace Tokenizers, respectively.
Alternatively, changing the `start_method` to "spawn" (when available, depending on your OS)
may fix your issues without disabling parallelism for other libraries.
See [issue #4848](https://github.com/allenai/allennlp/issues/4848) for more info.
Dead-locks could also be caused by running out of shared memory (see below).
- **Shared memory restrictions**
Tensors are passed between processes using shared memory, and some systems impose strict
limits on the allowed size of shared memory.
Luckily this is simple to debug and simple to fix.
First, to verify that this is your issue just watch your shared memory as your data loader runs.
For example, run `watch -n 0.3 'df -h | grep shm'`.
If you're seeing your shared memory blow up until it maxes-out, then you either need to decrease
`max_instances_in_memory` or increase your system's `ulimit`.
If you're using Docker, you can increase the shared memory available on a container by running
it with the option `--ipc=host` or by setting `--shm-size`.
See [issue #4847](https://github.com/allenai/allennlp/issues/4847) for more info.
""" # noqa: E501
def __init__(
self,
reader: DatasetReader,
data_path: DatasetReaderInput,
*,
batch_size: int = None,
drop_last: bool = False,
shuffle: bool = False,
batch_sampler: BatchSampler = None,
batches_per_epoch: int = None,
num_workers: int = 0,
max_instances_in_memory: int = None,
start_method: str = "fork",
cuda_device: Optional[Union[int, str, torch.device]] = None,
quiet: bool = False,
collate_fn: DataCollator = DefaultDataCollator(),
) -> None:
# Do some parameter validation.
if num_workers is not None and num_workers < 0:
raise ValueError("num_workers cannot be a negative number")
if batch_size is not None and batch_size < 1:
raise ValueError("batch_size must be at least 1")
if batch_sampler is not None:
if batch_size is not None:
raise ValueError("batch_sampler option is mutually exclusive with batch_size")
if drop_last:
raise ValueError("batch_sampler option is mutually exclusive with drop_last")
if shuffle:
raise ValueError("batch_sampler option is mutually exclusive with shuffle")
elif batch_size is None:
raise ValueError("batch_size is required when batch_sampler is not supplied")
if batches_per_epoch is not None and batches_per_epoch < 1:
raise ValueError("batches_per_epoch must be at least 1")
if max_instances_in_memory is not None:
if batch_size is not None and max_instances_in_memory < batch_size:
raise ValueError("max_instances_in_memory must be at least batch_size")
elif max_instances_in_memory < 1:
raise ValueError("max_instances_in_memory must be at least 1")
self.reader = reader
self.data_path = data_path
self.batch_size = batch_size
self.drop_last = drop_last
self.shuffle = shuffle
self.batch_sampler = batch_sampler
self.batches_per_epoch = batches_per_epoch
self.num_workers = num_workers
self.collate_fn = collate_fn
self.max_instances_in_memory = max_instances_in_memory
self.start_method = start_method
self.quiet = quiet
self.cuda_device: Optional[torch.device] = None
if cuda_device is not None:
if not isinstance(cuda_device, torch.device):
self.cuda_device = torch.device(cuda_device)
else:
self.cuda_device = cuda_device
# Can only initialize CUDA in workers when these `start_methods` are used.
self._worker_cuda_safe = self.start_method in {"spawn", "forkserver"}
# To make sure we have some backpressure in the worker queues we try to set
# reasonable defaults for the maximum size of these queues.
# They have to be big enough that is doesn't hurt performance, but small enough
# that they don't take up too many resources when there is a bottleneck on the
# consuming end of a queue.
effective_batch_size = (
self.batch_size if self.batch_sampler is None else self.batch_sampler.get_batch_size()
)
self._max_instance_queue_size = (
None
if max_instances_in_memory is None
else 2 * self.num_workers * max_instances_in_memory
)
self._max_batch_queue_size = (
None
if max_instances_in_memory is None
else 2 * self.num_workers * max_instances_in_memory // (effective_batch_size or 1)
)
# If max_instances_in_memory is not given, we'll keep a cache of all instances in this list.
self._instances: Optional[List[Instance]] = None
# Keeps track of state when `batches_per_epoch` is used.
self._batch_generator: Optional[Iterator[TensorDict]] = None
# For indexing instances.
self._vocab: Optional[Vocabulary] = None
if self.max_instances_in_memory is None:
# Load all instances right away.
deque(self.iter_instances(), maxlen=0)
@overrides
def index_with(self, vocab: Vocabulary) -> None:
self._vocab = vocab
if self._instances:
for instance in self._instances:
instance.index_fields(vocab)
@overrides
def __len__(self) -> int:
if self.batches_per_epoch is not None:
return self.batches_per_epoch
elif self.max_instances_in_memory is None:
# We haven't read the instances yet, so we do so now, caching them as we go.
if not self._instances:
deque(self.iter_instances(), maxlen=0)
if self.batch_sampler is not None:
return self.batch_sampler.get_num_batches(self._instances) # type: ignore
num_instances = len(self._instances) # type: ignore
# We know batch_size won't be None here since `batch_sampler` is None.
batch_size: int = self.batch_size # type: ignore
if self.drop_last or num_instances % batch_size == 0:
return num_instances // batch_size
else:
return 1 + num_instances // batch_size
else:
# We can't know the number of batches for a lazy loader when batches_per_epoch
# is not specified.
raise TypeError
@overrides
def __iter__(self) -> Iterator[TensorDict]:
if self._vocab is None:
raise ValueError(
"This DataLoader has not been indexed with a Vocabulary yet. "
"Did you forget to call DataLoader.index_with(vocab)?"
)
if self.batches_per_epoch is None:
yield from self._iter_batches()
else:
if self._batch_generator is not None:
batch_generator = self._batch_generator
# Can't have a pointer to this in `self` when we try to spawn workers.
self._batch_generator = None
else:
batch_generator = self._iter_batches()
for i in range(self.batches_per_epoch):
try:
yield next(batch_generator)
except StopIteration: # batch_generator is exhausted
batch_generator = self._iter_batches() # so refresh it
yield next(batch_generator)
self._batch_generator = batch_generator
@overrides
def iter_instances(self) -> Iterator[Instance]:
if self._instances:
yield from self._instances
else:
if self.max_instances_in_memory is None:
self._instances = []
if self.num_workers <= 0:
# Just read all instances in main process.
for instance in self._maybe_tqdm(
self.reader.read(self.data_path), desc="loading instances"
):
self.reader.apply_token_indexers(instance)
if self.max_instances_in_memory is None:
self._instances.append(instance) # type: ignore
if self._vocab is not None:
instance.index_fields(self._vocab)
yield instance
else:
ctx = mp.get_context(self.start_method)
queue: mp.JoinableQueue = (
ctx.JoinableQueue()
if self._max_instance_queue_size is None
else ctx.JoinableQueue(maxsize=self._max_instance_queue_size)
)
workers = self._start_instance_workers(queue, ctx)
try:
for instance in self._maybe_tqdm(
self._gather_instances(queue), desc="loading instances"
):
if self.max_instances_in_memory is None:
self._instances.append(instance) # type: ignore
yield instance
finally:
if hasattr(queue, "close"): # for compat with different Python versions.
queue.close() # type: ignore[attr-defined]
self._join_workers(workers, queue)
@overrides
def set_target_device(self, device: torch.device) -> None:
self.cuda_device = device
def _iter_batches(self) -> Iterator[TensorDict]:
if self._instances is not None or self.num_workers <= 0:
for batch in self._instances_to_batches(self.iter_instances(), move_to_device=True):
yield batch
else:
ctx = mp.get_context(self.start_method)
queue: mp.JoinableQueue = (
ctx.JoinableQueue()
if self._max_batch_queue_size is None
else ctx.JoinableQueue(maxsize=self._max_batch_queue_size)
)
workers = self._start_batch_workers(queue, ctx)
try:
# We can now start consuming from the `queue` as the batch workers
# produce batches.
done_count: int = 0
while done_count < self.num_workers:
for batch, worker_error in iter(queue.get, (None, None)):
if worker_error is not None:
e, tb = worker_error
raise WorkerError(e, tb)
if not self._worker_cuda_safe and self.cuda_device is not None:
# Need to move batch to target device now.
batch = nn_util.move_to_device(batch, self.cuda_device)
yield batch
queue.task_done()
done_count += 1
finally:
if hasattr(queue, "close"): # for compat with different Python versions.
queue.close() # type: ignore[attr-defined]
self._join_workers(workers, queue)
def _start_instance_workers(self, queue: mp.JoinableQueue, ctx) -> List[BaseProcess]:
workers: List[BaseProcess] = []
for worker_id in range(self.num_workers):
worker: BaseProcess = ctx.Process(
target=self._instance_worker, args=(worker_id, queue), daemon=True
)
worker.start()
workers.append(worker)
return workers
def _start_batch_workers(self, queue: mp.JoinableQueue, ctx) -> List[BaseProcess]:
workers: List[BaseProcess] = []
for worker_id in range(self.num_workers):
worker: BaseProcess = ctx.Process(
target=self._batch_worker, args=(worker_id, queue), daemon=True
)
worker.start()
workers.append(worker)
return workers
def _join_workers(self, workers: List[BaseProcess], queue) -> None:
# Each worker will be blocking on a call to `queue.join()`,
# calling `queue.task_done()` times the number of workers will
# call the `queue.join()` to return, and each worker should exit on its own.
for _ in range(len(workers)):
try:
queue.task_done()
except ValueError:
# This happens if a worker died early.
break
# If for some reason the workers don't exit properly, we go through and terminate
# them anyway.
for worker in workers:
if worker.is_alive():
worker.terminate()
def _instance_worker(self, worker_id: int, queue: mp.JoinableQueue) -> None:
try:
self.reader._set_worker_info(WorkerInfo(self.num_workers, worker_id))
instances = self.reader.read(self.data_path)
checked_for_token_indexers: bool = False
for instance in instances:
# Check the first instance to make sure it doesn't contain any TextFields with
# token_indexers because we don't want to be duplicating those by sending
# them across processes.
if not checked_for_token_indexers:
for field_name, field in instance.fields.items():
if isinstance(field, TextField) and field._token_indexers is not None:
raise ValueError(
f"Found a TextField ({field_name}) with token_indexers already "
"applied, but you're using num_workers > 0 in your data loader. "
"Make sure your dataset reader's text_to_instance() method doesn't "
"add any token_indexers to the TextFields it creates. Instead, the token_indexers "
"should be added to the instances in the apply_token_indexers() method of your "
"dataset reader (which you'll have to implement if you haven't done "
"so already)."
)
checked_for_token_indexers = True
queue.put((instance, None))
except Exception as e:
queue.put((None, (repr(e), traceback.format_exc())))
# Indicate to the consumer that this worker is finished.
queue.put((None, None))
# Wait until this process can safely exit.
queue.join()
def _batch_worker(self, worker_id: int, queue: mp.JoinableQueue) -> None:
try:
self.reader._set_worker_info(WorkerInfo(self.num_workers, worker_id))
instances = self.reader.read(self.data_path)
for batch in self._instances_to_batches(
instances, move_to_device=self._worker_cuda_safe
):
queue.put((batch, None))
except Exception as e:
queue.put((None, (repr(e), traceback.format_exc())))
# Indicate to the consumer (main thread) that this worker is finished.
queue.put((None, None))
# Wait until this process can safely exit.
queue.join()
def _gather_instances(self, queue: mp.JoinableQueue) -> Iterable[Instance]:
done_count: int = 0
while done_count < self.num_workers:
for instance, worker_error in iter(queue.get, (None, None)):
if worker_error is not None:
e, tb = worker_error
raise WorkerError(e, tb)
self.reader.apply_token_indexers(instance)
if self._vocab is not None:
instance.index_fields(self._vocab)
yield instance
queue.task_done()
done_count += 1
def _index_instance(self, instance: Instance) -> Instance:
self.reader.apply_token_indexers(instance)
assert self._vocab is not None
instance.index_fields(self._vocab)
return instance
def _instances_to_batches(
self, instance_iterator: Iterable[Instance], move_to_device
) -> Iterator[TensorDict]:
instance_iterator = (self._index_instance(instance) for instance in instance_iterator)
if move_to_device and self.cuda_device is not None:
tensorize = lambda batch: nn_util.move_to_device( # noqa: E731
self.collate_fn(batch), self.cuda_device
)
else:
tensorize = self.collate_fn
if self.batch_sampler is not None:
instance_chunks: Iterable[List[Instance]]
if self.max_instances_in_memory is not None:
instance_chunks = lazy_groups_of(instance_iterator, self.max_instances_in_memory)
else:
instance_chunks = [list(instance_iterator)]
for instances in instance_chunks:
batches = (
[instances[i] for i in batch_indices]
for batch_indices in self.batch_sampler.get_batch_indices(instances)
)
for batch in batches:
yield tensorize(batch)
else:
# Safe to assume this is not `None` when `self.batch_sampler` is `None`.
assert self.batch_size is not None
if self.shuffle:
if self.max_instances_in_memory is not None:
instance_iterator = shuffle_iterable(
instance_iterator,
self.max_instances_in_memory,
)
else:
# At this point we've already loaded the instances in memory and indexed them,
# so this won't take long.
instance_iterator = list(instance_iterator)
random.shuffle(instance_iterator)
for batch in lazy_groups_of(instance_iterator, self.batch_size):
if self.drop_last and len(batch) < self.batch_size:
break
yield tensorize(batch)
def _maybe_tqdm(self, iterator: Iterable[_T], **tqdm_kwargs) -> Iterable[_T]:
if self.quiet:
return iterator
return Tqdm.tqdm(iterator, **tqdm_kwargs)
class WorkerError(Exception):
"""
An error raised when a worker fails.
"""
def __init__(self, original_err_repr: str, traceback: List[str]) -> None:
super().__init__(
f"worker raised {original_err_repr}\n\n"
" Traceback from worker:\n " + "".join(traceback)
# Remove the first line of the traceback since it's redundant.
.replace("Traceback (most recent call last):\n", "")
# Give a little indentation so it's clear this traceback is separate from the traceback
# in the main process.
.replace("\n", "\n ")
)
|
sourcetrail.py | """Sourcetrail server"""
import socket
import errno
import threading
import os.path
import encodings.idna
import subprocess
import time
import vim
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
MESSAGE_SPLIT_STRING = ">>"
class Options:
"""Option class for sourcetrail"""
instance = None
def __init__(self):
self.port_vim_to_sourcetrail = int(vim.eval('sourcetrail#get("vim_to_sourcetrail_port")'))
self.port_sourcetrail_to_vim = int(vim.eval('sourcetrail#get("sourcetrail_to_vim_port")'))
self.ip_addr = vim.eval('sourcetrail#get("sourcetrail_ip")')
@classmethod
def reload(cls):
"""Reload the options"""
cls.instance = Options()
@classmethod
def inst(cls):
""" Get the instance of Options"""
if cls.instance is None:
cls.reload()
return cls.instance
@classmethod
def get_port_vim_to_sourcetrail(cls):
"""Returns the port sourcetrail listens to"""
return cls.inst().port_vim_to_sourcetrail
@classmethod
def get_port_sourcetrail_to_vim(cls):
"""Returns the port vim listens to"""
return cls.inst().port_sourcetrail_to_vim
@classmethod
def get_ip(cls):
"""Return the ip address"""
return cls.inst().ip_addr
@classmethod
def print_settings(cls):
"""Prints the options"""
print("Sourcetrail Settings: \n--------------------")
print("Ports: ")
print("g:sourcetrail_to_vim_port: " + str(cls.inst().port_sourcetrail_to_vim))
print("g:vim_to_sourcetrail_port: " + str(cls.inst().port_vim_to_sourcetrail))
print("Ip: ")
print("g:sourcetrail_ip : " + cls.inst().ip_addr)
print("--------------------")
class ConnectionHandler(socketserver.BaseRequestHandler):
# This class is instantiated once per connection to the server
"""Handler for incomming messages"""
timeout = 5
def handle(self):
data = self.request.recv(1024).strip()
text = data.decode('utf-8')
eom_index = text.find("<EOM>")
if not eom_index == 0:
message_string = text[0:eom_index]
message_fields = message_string.split(MESSAGE_SPLIT_STRING)
if message_fields[0] == "moveCursor":
Sourcetrail.set_new_buffer(message_fields[1], \
int(message_fields[2]), int(message_fields[3]))
if message_fields[0] == "ping":
Sourcetrail.send_message("ping>>Vim<EOM>".encode())
else:
print("asdfasfd")
class Sourcetrail:
"""Sourcetrail class for structur"""
_instance = None
def __init__(self):
self.__col = 0
self.__row = 0
self.__file = ""
self.__update = False
self.__server = None
def __del__(self):
self.stop_server()
@classmethod
def inst(cls):
"""get a instance of Sourcetrail"""
if cls._instance is None:
cls._instance = Sourcetrail()
return cls._instance
@classmethod
def row(cls):
"""returns the current row"""
return cls.inst().__row
@classmethod
def col(cls):
"""returns the current column"""
return cls.inst().__col
@classmethod
def file(cls):
"""returns the current file"""
return cls.inst().__file
@classmethod
def start_server(cls):
"""starting the server to listen"""
if cls.inst().__server is None:
try:
socketserver.ThreadingTCPServer.allow_reuse_address = True
address = (Options.get_ip(), Options.get_port_sourcetrail_to_vim())
cls.inst().__server = socketserver.ThreadingTCPServer(address, ConnectionHandler)
server_thread = threading.Thread(target=cls.inst().__server.serve_forever)
server_thread.daemon = True
server_thread.start()
except socket.error:
print("Socket needed for Sourcetrail plugin already in use")
@classmethod
def stop_server(cls):
"""stop listening to the port"""
if cls.inst().__server is not None:
cls.inst().__server.shutdown()
cls.inst().__server.server_close()
@classmethod
def restart_server(cls):
"""restart the server"""
cls.inst().stop_server()
Options.reload()
cls.inst().start_server()
@classmethod
def send_activate_token(cls):
"""send activate token to sourcetrail"""
cls.inst().start_server()
filepath = vim.current.buffer.name
(row, col) = vim.current.window.cursor
col += 1 # cols returned by rowcol() are 0-based.
text = "setActiveToken" + MESSAGE_SPLIT_STRING \
+ filepath + MESSAGE_SPLIT_STRING + str(row) \
+ MESSAGE_SPLIT_STRING + str(col) + "<EOM>"
data = text.encode()
try:
cls.inst().send_message(data)
print("Current Position sent to Sourcetrail")
except socket.error:
print("Counld not send to Sourcetrail")
@classmethod
def send_message(cls, message):
"""sends a message to sourcetrail"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((Options.get_ip(), Options.get_port_vim_to_sourcetrail()))
sock.send(message)
sock.close()
except socket.error:
raise socket.error
@classmethod
def set_new_buffer(cls, filepath, row, col):
"""set currents data"""
cls.inst().__col = col
cls.inst().__row = row
cls.inst().__file = filepath
cls.inst().__update = True
@classmethod
def update_buffer(cls):
"""update"""
if cls.inst().__server is None:
cls.inst().start_server()
print("Vim was not listening to Sourcetrail. Vim is listening now.")
print("Try to send again from Sourcetrail.")
else:
if cls.inst().__update:
# Must clear the __update flag before "e!" due to autocmd nesting
cls.inst().__update = False
vim.command("e! " + cls.inst().__file)
vim.current.window.cursor = (cls.inst().__row, cls.inst().__col)
@classmethod
def print_settings(cls):
""" Prints Settings """
Options.inst().print_settings()
|
splib.py | # Superparameteriation coupling code for OpenIFS <--> Dales
#
# Fredrik Jansson, Gijs van den Oord
# 2017-2019
#
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import shutil
import threading
from Queue import Queue # note named queue in python 3
import datetime
import numpy
import sys
import time
import modfac
import spcpl
import sputils
import spio
import spmpi
import psutil
from amuse.community import *
from amuse.rfi import channel # to query MPI threading support
#from amuse.rfi.channel import AsyncRequestsPool
from amuse.rfi.async_request import AsyncRequestsPool
# Logger
log = logging.getLogger(__name__)
# Module configuration variables
gcm_type = "oifs"
gcm_steps = 10 # number of gcm time steps to perform
gcm_exp_name = "TEST" # openifs experiment name
gcm_input_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../oifs-input") # openifs input directory
gcm_run_dir = "oifs-work" # openifs run directory
gcm_num_procs = 1 # openifs MPI tasks
gcm_redirect = "file" # redirection for gcm
gcm_forcing_factor = 1 # scale factor for forcings upon openifs
les_type = "dales"
les_dt = 60 # les time step (<0: adaptive)
les_spinup = 0
les_spinup_steps = 1
les_spinup_forcing_factor = 1.
les_exp_name = "test" # les experiment name
les_input_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../dales-input") # les input directory
les_run_dir = "dales-work" # les run directory
les_num_procs = 1 # MPI tasks per les instance
les_redirect = "file" # redirection for les
les_forcing_factor = 1 # scale factor for forcings upon les
les_queue_threads = sys.maxint # les run scheduling (1: all serial, > 1: nr. of concurrent worker threads)
max_num_les = -1 # Maximal number of LES instances
init_les_state = True # initialize les instances to the openifs column state
output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../spifs-output") # Output folder
output_name = "spifs.nc" # output netcdf file name
channel_type = "sockets" # amuse communication type (choose from ["sockets","mpi"])
dryrun = False # if true, only start the GCM to examine the grid.
async_evolve = True # time step LES instances using asynchronous amuse calls instead of Python threads (experimental)
restart = False # restart an old run
cplsurf = False # couple surface fields
firststep = True # flag for this being the first step - experimentally used in restarts to not log the weird first step
qt_forcing = "sp"
# Model instances:
# Global circulation model
gcm_model = None
# Local large eddy simulation models
les_models = []
output_column_indices = []
output_columns = [] # tuple (index, lat, lon)
errorFlag = False # flag raised when a worker thread generates an exception
# Writes gridpoint file for the input geometry
def save_dryrun_info(lons, lats):
log.info("Dry run - saving grid point coordinates in gridpoints.txt.")
points = numpy.column_stack((lons, lats))
numpy.savetxt('gridpoints.txt', points, fmt='%10.6f')
log.info("Dry run finished - will exit now.")
finalize()
sys.exit()
# Initializes the system
def initialize(config, geometries, output_geometries=None):
global gcm_model, les_models, output_name, async_evolve, output_column_indices, output_columns
read_config(config)
if not restart and os.path.exists(output_dir):
raise Exception("output dir %s exists"%output_dir)
# if output_name is a relative path (as by default),
# treat it as relative to output_dir
if not os.path.isabs(output_name):
output_name = os.path.join(output_dir, output_name)
# TODO: validate input parameters
run_dir = os.path.join(output_dir, gcm_run_dir)
if channel_type == "nospawn":
spmpi.send_model_colors(gcm_num_procs, les_num_procs, max_num_les)
# TODO: Replace Dales and openifs channel factory methods...
gcm_model = gcm_init(gcm_type, gcm_input_dir, run_dir, couple_surface=cplsurf)
les_models = []
lons = gcm_model.longitudes.value_in(units.deg)
lats = gcm_model.latitudes.value_in(units.deg)
grid_indices = sputils.get_mask_indices(zip(lons, lats), geometries, max_num_les)
output_geoms = [] if output_geometries is None else output_geometries
output_column_indices = sputils.get_mask_indices(zip(lons, lats), output_geoms)
# exclude columns with embedded LES from the output_column_indices
output_column_indices = list(set(output_column_indices) - set(grid_indices))
output_columns = [(i, lats[i], lons[i]) for i in output_column_indices]
log.info("Creating LES models in grid columns ") # % str(grid_indices))
for i in grid_indices:
log.info("%7d x=%8.3f y=%8.3f" % (i, lons[i], lats[i]))
log.info("Extra netCDF output for grid columns ") # % str(grid_indices))
for c in output_columns:
log.info("%7d x=%8.3f y=%8.3f" % (c[0], c[2], c[1]))
if dryrun:
save_dryrun_info(lons, lats)
local_les_input_dir = os.path.join(output_dir, 'les-input')
if not restart:
# Copy les input directory into run directory. Pass the local copy to the les model init.
shutil.copytree(les_input_dir, local_les_input_dir)
startdate = gcm_model.get_start_datetime() - datetime.timedelta(seconds=les_spinup)
for i in grid_indices:
instance_run_dir = os.path.join(output_dir, les_run_dir + '-' + str(i))
les = les_init(les_type, local_les_input_dir, instance_run_dir, startdate, i)
gcm_model.set_mask(i) # tell GCM that a LES instance is present at this point
les.grid_index = i
les.lat, les.lon = lats[i], lons[i]
les_models.append(les)
spio.init_netcdf(output_name, gcm_model, les_models, startdate, output_columns, append=restart,
with_surf_vars=cplsurf)
log.info("Successfully initialized GCM and %d LES instances" % len(les_models))
# Switch off async in case any model doesn't support it
async_evolve = async_evolve and reduce(lambda p, q: p and q,
[getattr(m, "support_async", True) for m in [gcm_model] + les_models])
if channel_type != "sockets":
# the actual thread level provided by the MPI library
log.info(
"MpiChannel.is_multithreading_supported(): %s" % (str(channel.MpiChannel.is_multithreading_supported())))
if not channel.MpiChannel.is_multithreading_supported():
if not async_evolve and les_queue_threads > 1:
log.info(
"Options are set to run Dales instances from separate python threads but the MPI in use does not "
"support multithreading. Exit.")
sys.exit()
gcm_model.first_half_step_done = False
if not restart:
numpy.random.seed(42) # seed generator the same way every time - for repeatable simulation
# do first half of first time step in openIFS now, so that U,V,T get initialized
log.info("gcm.evolve_model_until_cloud_scheme() - first step")
gcm_model.evolve_model_until_cloud_scheme()
log.info("gcm.evolve_model_cloud_scheme() - first step")
gcm_model.evolve_model_cloud_scheme()
gcm_model.first_half_step_done = True # set flag here, to avoid repeating the half step
# spio.update_time(gcm_model.get_model_time())
spinup_delta_t = les_spinup / les_spinup_steps
spio.update_time(spinup_delta_t | units.s) # time stamps are for the LES time at the end of the step
if init_les_state:
spcpl.gather_gcm_data(gcm_model, les_models, True)
# Note: no surface fluxes can be fetched after a full time step
# But now we experimentally stay in the middle of the first step,
# and then we can fetch them
# get the state and apply it on les as initial state
for les in les_models:
u, v, thl, qt, ps, ql = spcpl.convert_profiles(les)
spcpl.set_les_state(les, u, v, thl, qt, ps)
if les_spinup > 0:
run_spinup(les_models, gcm_model, les_spinup, les_spinup_steps)
else: # we're doing a restart
pass
return gcm_model, les_models
# Run loop: executes nsteps time steps of the super-parametrized GCM
def run(nsteps):
current_process = psutil.Process(os.getpid()) # get current process, for resource usage measurement
have_work_queue = 1 < les_queue_threads < len(les_models)
# TODO: Check whether the gcm supports another nsteps steps
work_queue, worker_threads = None, []
if have_work_queue:
work_queue, worker_threads = start_worker_threads(les_queue_threads)
# timestep models together
for s in range(nsteps):
step(work_queue)
log.info('python master usage: %s' % str(current_process.memory_full_info()))
log.info('System total: %s' % str(psutil.virtual_memory()))
log.info(' ---- Time step done ---')
if have_work_queue:
stop_worker_threads(work_queue, worker_threads)
# Spinup loop: executes nsteps time steps of the super-parametrized GCM
def run_spinup(les_list, gcm, spinup_length, spinup_steps=1):
have_work_queue = 1 < les_queue_threads < len(les_list)
# TODO: Check whether the gcm supports another nsteps steps
work_queue, worker_threads = None, []
if have_work_queue:
work_queue, worker_threads = start_worker_threads(les_queue_threads)
iteration_length = spinup_length / spinup_steps
for s in range(spinup_steps):
if s == spinup_steps - 1:
iteration_length = spinup_length - (spinup_steps - 1) * iteration_length
step_spinup(les_list, work_queue, gcm, spinup_length=iteration_length)
log.info('System total: %s' % str(psutil.virtual_memory()))
log.info(' ---- Spinup done ---')
if have_work_queue:
stop_worker_threads(work_queue, worker_threads)
timing_file = None
def open_timing_file():
global timing_file
timing_file = open(output_dir + '/timing.txt', 'a')
# at a fresh start, write a list of the LES grid points to the file
if not restart:
s = '# LES grid points\n'
s += ' '.join([str(les.grid_index) for les in les_models])
s += '\n# timing data\n'
timing_file.write(s)
# do one gcm time step
# step les until it catches up
def step(work_queue=None):
global timing_file,firststep
if not timing_file:
open_timing_file()
# don't write to spifs.nc at the first step of a restarted run.
# the first step seems to repeat the last step of the previous run.
writeCDF = (not(restart and firststep))
t = gcm_model.get_model_time()
delta_t = gcm_model.get_timestep()
log.info("gcm time at start of timestep is %s" % str(t))
# want this message before the time stepping
# until_cloud_scheme and cloud_scheme below do not change the model time
starttime = time.time()
gcm_walltime1 = -time.time()
if writeCDF and not firststep:
spio.update_time(gcm_model.get_model_time() + (les_spinup | units.s) + delta_t)
try:
if gcm_model.first_half_step_done:
# if we already did the first half step as part of the initialization,
# don't repeat it now.
gcm_model.first_half_step_done = False
else:
log.info("gcm.evolve_model_until_cloud_scheme()")
gcm_model.evolve_model_until_cloud_scheme()
log.info("gcm.evolve_model_cloud_scheme()")
gcm_model.evolve_model_cloud_scheme() # note: overwrites set tendencies
except Exception as e:
log.error("Exception when time-stepping openIFS: %s Exiting." % e.message)
log.error(sys.exc_info())
finalize()
sys.exit(1)
gcm_walltime1 += time.time()
gcm_model.step += 1
gather_gcm_data_walltime = -time.time()
spcpl.gather_gcm_data(gcm_model, les_models, cplsurf, output_column_indices, write=writeCDF)
gather_gcm_data_walltime += time.time()
set_les_forcings_walltime = -time.time()
for les in les_models:
spcpl.set_les_forcings(les, gcm_model, dt_gcm=delta_t, factor=les_forcing_factor,
couple_surface=cplsurf, qt_forcing=qt_forcing, write=writeCDF)
set_les_forcings_walltime += time.time()
# step les models to the end time of the current GCM step = t + delta_t
les_wall_times = step_les_models(t + delta_t, work_queue, offset=les_spinup)
set_gcm_tendencies_walltime = -time.time()
# get les state - for forcing on OpenIFS and les stats
for les in les_models:
spcpl.set_gcm_tendencies(gcm_model, les, factor=gcm_forcing_factor, write=writeCDF)
set_gcm_tendencies_walltime += time.time()
gcm_walltime2 = -time.time()
gcm_model.evolve_model_from_cloud_scheme()
gcm_walltime2 += time.time()
log.info("gcm evolved to %s" % str(gcm_model.get_model_time()))
s = ('%10.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % (starttime, gcm_walltime1, gather_gcm_data_walltime, set_les_forcings_walltime, set_gcm_tendencies_walltime, gcm_walltime2)
+ ' ' + ' '.join(['%6.2f' % t for t in les_wall_times]) + '\n')
timing_file.write(s)
timing_file.flush()
# sync spifs.nc now, if we have no LES models.
# if we do have LES models, we sync elsewhere, while the LES models are busy.
if len(les_models) == 0:
spio.sync_root()
firststep = False
# Initialization function
def step_spinup(les_list, work_queue, gcm, spinup_length):
global timing_file, firststep
if not any(les_list): return
if not timing_file:
open_timing_file()
if not firststep:
# in the very first step, this has already been done in the initialization
spio.update_time(les_list[0].get_model_time() + (spinup_length | units.s))
starttime = time.time()
t_les = les_list[0].get_model_time()
set_les_forcings_walltime = -time.time()
for les in les_list:
spcpl.set_les_forcings(les, gcm, dt_gcm=spinup_length| units.s, factor=les_spinup_forcing_factor,
couple_surface=cplsurf, qt_forcing=qt_forcing)
set_les_forcings_walltime += time.time()
# step les models
les_wall_times = step_les_models(t_les + (spinup_length | units.s), work_queue, offset=0)
set_gcm_tendencies_walltime = -time.time() # assign the profile writing time to the same slot as setting gcm tendencies
for les in les_list:
spcpl.write_les_profiles(les)
set_gcm_tendencies_walltime += time.time()
firststep = False
gcm_walltime1 = 0
gcm_walltime2 = 0
gather_gcm_data_walltime = 0
s = ('%10.2f %6.2f %6.2f %6.2f %6.2f %6.2f' % (starttime, gcm_walltime1, gather_gcm_data_walltime, set_les_forcings_walltime, set_gcm_tendencies_walltime, gcm_walltime2)
+ ' ' + ' '.join(['%6.2f' % t for t in les_wall_times]) + '\n')
timing_file.write(s)
timing_file.flush()
# Function for stopping gcm and all les instances
# this is called both at a normal exit and when an exception
# is generated in one of the worker threads.
# The goal then is to 1) make all threads quit, so that the job ends instead of hanging
# 2) make all worker_threads quit as nicely as possible so that results are saved
def finalize(save_restart=True):
if save_restart:
log.info("Asking Dales to save restart files.")
# save LES restart files
for les in les_models:
les.write_restart()
log.info("spifs cleanup...")
log.info("Stopping gcm...")
try:
gcm_model.cleanup_code()
gcm_model.stop()
except Exception as e:
log.error("Exception while stopping gcm: %s" % e.message)
log.info("Stopping LES instances...")
for les in les_models:
try:
les.cleanup_code()
les.stop()
except Exception as e:
log.error("Exception while stopping LES at index %d: %s" % (les.grid_index, e.message))
spio.cdf_root.close()
log.info("spifs cleanup done")
# Reads input parameters from input file or dictionary
def read_config(config):
userconf = {}
if isinstance(config, str):
if os.path.isfile(config):
with open(config) as f:
userconf = json.load(f)
else:
log.error("Could not find input configuration file %s" % config)
elif isinstance(config, dict):
userconf = config
elif userconf:
log.error("Could not read configurations from object of type %s" % type(config))
if userconf:
log.info("Configuration options read: %s" % str(userconf))
for key in userconf:
if key in globals():
if hasattr(userconf[key], "__call__"):
log.info("Skipping setting module function %s to value %s..." % (key, str(userconf[key])))
continue
log.info("Setting module variable %s to value %s..." % (key, str(userconf[key])))
globals()[key] = userconf[key]
# Creates and initialized the GCM
def gcm_init(gcmtype, inputdir, workdir, couple_surface):
typekey = gcmtype
if gcmtype == modfac.dummy_type:
typekey = modfac.dummy_gcm_type
if gcmtype == modfac.ncbased_type:
typekey = modfac.ncfile_gcm_type
model = modfac.create_model(typekey, inputdir, workdir,
nprocs=gcm_num_procs,
redirect=gcm_redirect,
channel_type=channel_type,
restart=restart,
restart_steps=gcm_steps)
model.initialize_code()
model.exp_name = gcm_exp_name
model.num_steps = gcm_steps
model.step = 0
model.commit_parameters()
model.commit_grid()
log.info("gcm_init called with couple_surface = " + str(couple_surface))
model.set_vdf_in_sp_mask(not couple_surface)
return model
# Creates and initialized a LES model
def les_init(lestype, inputdir, workdir, starttime, index):
typekey = lestype
if lestype == modfac.dummy_type:
typekey = modfac.dummy_les_type
if lestype == modfac.ncbased_type:
typekey = modfac.ncfile_les_type
# optionally, schedule restart files to be written at the end of the run
# currently, one restart is explicitly requested at the end of the run, in filanlize()
# trestart = gcm_steps * (900 | units.s) # TODO: gcm dt currently hardcoded
# if not restart:
# trestart += les_spinup | units.s
# # add spinup time to trestart
trestart = 0 | units.s # don't write periodic restarts
model = modfac.create_model(typekey, inputdir, workdir,
nprocs=les_num_procs,
redirect=les_redirect,
channel_type=channel_type,
restart=restart,
trestart=trestart,
starttime=starttime,
index=index,
qt_forcing=qt_forcing)
# model.initialize_code()
model.commit_parameters()
model.commit_grid()
return model
# Starts a number of threads for running les models
def start_worker_threads(num_threads):
worker_threads = []
work_queue = Queue()
for i in range(num_threads):
t = threading.Thread(target=worker, args=(work_queue, i), name="worker " + str(i))
worker_threads.append(t)
t.start()
return work_queue, worker_threads
# a worker thread - using a work queue
def worker(work_queue, i):
global errorFlag
while True:
les, model_time, offset = work_queue.get()
if les is None:
log.info("Worker thread %d exiting" % i)
work_queue.put((None, None)) # put the special quit work back into the queue
return # stop this thread
log.info("Worker thread %d evolves les at index %d to time %s" % (i, les.grid_index, model_time))
step_les(les, model_time, offset)
work_queue.task_done()
log.info("Worker thread %d is done." % i)
# Signals and waits for all worker threads to stop.
def stop_worker_threads(work_queue, worker_threads):
log.info("Signalling worker threads to quit...")
work_queue.put((None, None)) # special work - signals the worker_threads to quit.
log.info("Waiting for worker threads to quit...")
for w in worker_threads: # wait for the worker threads to quit
w.join()
# TODO: with the queue, could let dales instances run immediately when their forcings are set
# instead of setting all forcings then running all
def step_les_models(model_time, work_queue, offset=les_spinup):
global errorFlag
les_wall_times = []
if not any(les_models):
return les_wall_times
if les_queue_threads >= len(les_models): # Step all dales models in parallel
if async_evolve: # evolve all dales models with asynchronous Amuse calls
reqs = []
pool = AsyncRequestsPool()
for les in les_models:
req = les.evolve_model.asynchronous(model_time + (offset | units.s), exactEnd=True)
reqs.append(req)
pool.add_request(req)
# now while the dales threads are working, sync the netcdf to disk
spio.sync_root()
# wait for all threads
pool.waitall()
try:
les_wall_times = [r.result().value_in(units.s) for r in reqs]
log.info("async step_les_models() done. Elapsed times:" + str(['%5.1f' % t for t in les_wall_times]))
except Exception as e:
log.error("Exception caught while gathering results: %s" % e.message)
else: # evolve all dales models using python threads
threads = []
for les in les_models:
t = threading.Thread(target=step_les, args=(les, model_time, offset), name=str(les.grid_index))
# t.setDaemon(True)
threads.append(t)
t.start()
# now while the dales threads are working, sync the netcdf to disk
spio.sync_root()
# wait for all threads
for t in threads:
# log.info("Waiting to join thread %s..." % t.name)
t.join()
# log.info("joined thread %s" % t.name)
elif les_queue_threads > 1:
for les in les_models:
work_queue.put((les, model_time)) # enqueue all dales instances
# now while the dales threads are working, sync the netcdf to disk
spio.sync_root()
work_queue.join() # wait for all dales work to be completed
if errorFlag:
log.info("One thread failed - exiting ...")
# stop_worker_threads(work_queue) # signal worker threads to quit - now an atexit function, should not
# need it here
finalize()
sys.exit(1)
else: # sequential version
for les in les_models:
step_les(les, model_time, offset)
return les_wall_times
# step a dales instance to a given Time
def step_les(les, stoptime, offset=0):
start = time.time()
# small les time steps for cloud field gathering
step_dt = les_dt | units.s
epsilon = 1 | units.s # tolerance for fp comparison
if not les_dt > 0:
# simply step until caught up
les.evolve_model(stoptime + (offset | units.s), exactEnd=1)
else:
# fixed-length stepping intervals to save statistics during the les run
t = les.get_model_time()
log.info("Les at point %d starts at %.0f, should reach %.0f with offset %d" % (
les.grid_index, t.value_in(units.s), stoptime.value_in(units.s), offset))
while t < stoptime - epsilon + (offset | units.s):
t += step_dt
les.evolve_model(t, exactEnd=1)
t = les.get_model_time()
walltime = time.time() - start
log.info("Les at point %d evolved to %.0f s - elapsed %f s" % (les.grid_index, t.value_in(units.s), walltime))
|
_fakefs.py | import atexit
import errno
import inspect
import os
import stat
import subprocess
from threading import Thread
from uuid import uuid4
try:
import fuse # pytype: disable=import-error
except ImportError: # pragma: no cover
raise ImportError(
inspect.cleandoc(
'''
Failed to import fuse, the following steps show you how to install it:
sudo apt install -y fuse libfuse-dev
pip3 install fuse-python --user
'''))
if not hasattr(fuse, '__version__'): # pragma: no cover
raise RuntimeError(
"your fuse-py doesn't know of fuse.__version__, probably it's too old.")
fuse.fuse_python_api = (0, 2)
def translate_error_to_errno(error): # pragma: no cover
if isinstance(error, FileNotFoundError):
return -errno.ENOENT
elif isinstance(error, PermissionError):
return -errno.EACCES
elif isinstance(error, EnvironmentError):
return -errno.ENODEV
return -errno.ENOSYS
def translate_path(path): # pragma: no cover
slices = path.split('/')
assert slices[0] == '', path
if slices[1].endswith(':'):
return slices[1] + '//' + '/'.join(slices[2:])
return path
class FakeFS(fuse.Fuse):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.mountpoint = os.path.join('/tmp/refile/fake', str(uuid4()))
self.started = False
self.files = {}
def getattr(self, path): # pragma: no cover
from megfile.smart import smart_isfile, smart_stat
path = translate_path(path)
try:
result = fuse.Stat()
if smart_isfile(path):
info = smart_stat(path)
result.st_mode = stat.S_IFREG | 0o444
result.st_nlink = 1
result.st_size = info.size
result.st_mtime = info.mtime
else:
result.st_mode = stat.S_IFDIR | 0o755
result.st_nlink = 1
except Exception as error:
return translate_error_to_errno(error)
return result
def open(self, path, flags): # pragma: no cover
from megfile.smart import smart_open
path = translate_path(path)
accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
if (flags & accmode) != os.O_RDONLY:
return -errno.EACCES
if path not in self.files:
self.files[path] = smart_open(path, 'rb')
def release(self, path, flags): # pragma: no cover
path = translate_path(path)
if path not in self.files:
return
file = self.files[path]
file.close()
del self.files[path]
def read(self, path, size, offset): # pragma: no cover
path = translate_path(path)
if path not in self.files:
return -errno.ENOENT
file = self.files[path]
file.seek(offset)
return file.read(size)
def start(self):
if self.started:
return
os.makedirs(self.mountpoint)
self.parse([self.mountpoint, '-f'], errex=1)
self.daemon = Thread(target=self.main, daemon=True)
self.daemon.start()
atexit.register(self.stop)
self.started = True
def stop(self):
if not self.started:
return
for file in self.files.values():
file.close()
self.files = {}
try:
subprocess.check_call(['fusermount', '-u', self.mountpoint])
except FileExistsError:
subprocess.check_call(['sudo', 'umount', self.mountpoint])
os.rmdir(self.mountpoint)
atexit.unregister(self.stop)
self.started = False
fakefs = FakeFS()
|
utils.py | """
Distributed under the terms of the BSD 3-Clause License.
The full license is in the file LICENSE, distributed with this software.
Author: Jun Zhu <jun.zhu@xfel.eu>
Copyright (C) European X-Ray Free-Electron Laser Facility GmbH.
All rights reserved.
"""
import os
import psutil
import multiprocessing as mp
import functools
import subprocess
from threading import RLock, Thread
import time
from .logger import logger
# profiler will only print out information if the execution of the given
# function takes more than the threshold value.
PROFILER_THREASHOLD = 1.0 # in ms
def profiler(info, *, process_time=False):
def wrap(f):
@functools.wraps(f)
def timed_f(*args, **kwargs):
if process_time:
timer = time.process_time
else:
timer = time.perf_counter
t0 = timer()
result = f(*args, **kwargs)
dt_ms = 1000 * (timer() - t0)
if dt_ms > PROFILER_THREASHOLD:
logger.debug(f"Process time spent on {info}: {dt_ms:.3f} ms")
return result
return timed_f
return wrap
class BlockTimer:
"""A context manager for measuring the execution time of a code block
For example::
>>> with BlockTimer("foo"):
... time.sleep(1)
...
Execution of foo: 1.001s
"""
def __init__(self, label="block", enabled=True):
"""Create the timer object
:param str label: A name to identify the block being timed.
:param bool enabled: Whether or not to enable this timer.
"""
self._label = label
self._enabled = enabled
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *_):
duration = time.perf_counter() - self.start
if self._enabled:
logger.info(f"Execution of {self._label}: {duration:.4f}s")
_NOT_FOUND = object()
class cached_property:
"""cached_property since Python3.8"""
def __init__(self, func):
self.func = func
self.attrname = None
self.__doc__ = func.__doc__
self.lock = RLock()
def __set_name__(self, owner, name):
if self.attrname is None:
self.attrname = name
elif name != self.attrname:
raise TypeError(
f"Cannot assign the same cached_property to two different "
f"names ({self.attrname!r} and {name!r})."
)
def __get__(self, instance, owner):
if instance is None:
return self
if self.attrname is None:
raise TypeError(
"Cannot use cached_property instance without calling "
"__set_name__ on it.")
try:
cache = instance.__dict__
except AttributeError:
# not all objects have __dict__ (e.g. class defines slots)
msg = (
f"No '__dict__' attribute on {type(instance).__name__!r} "
f"instance to cache {self.attrname!r} property."
)
raise TypeError(msg) from None
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
with self.lock:
# check if another thread filled cache while we awaited lock
val = cache.get(self.attrname, _NOT_FOUND)
if val is _NOT_FOUND:
val = self.func(instance)
try:
cache[self.attrname] = val
except TypeError:
msg = (
f"The '__dict__' attribute on "
f"{type(instance).__name__!r} instance does not "
f"support item assignment for caching "
f"{self.attrname!r} property."
)
raise TypeError(msg) from None
return val
def _get_system_cpu_info():
"""Get the system cpu information."""
class CpuInfo:
def __init__(self, n_cpus=None):
self.n_cpus = n_cpus
def __repr__(self):
return f"[CPU] count: {self.n_cpus}"
return CpuInfo(mp.cpu_count())
def _get_system_memory_info():
"""Get the system memory information."""
class MemoryInfo:
def __init__(self, total_memory=None, used_memory=None):
self.total_memory = total_memory
self.used_memory = used_memory
def __repr__(self):
return f"[Memory] " \
f"total: {self.total_memory / 1024**3:.1f} GB, " \
f"used: {self.used_memory / 1024**3:.1f} GB"
mem = psutil.virtual_memory()
return MemoryInfo(mem.total, mem.used)
def _get_system_gpu_info():
"""Get the system GPU information."""
class GpuInfo:
def __init__(self,
gpu_name=None,
total_memory=None,
used_memory=None):
self.name = gpu_name
self.total_memory = total_memory
self.used_memory = used_memory
def __repr__(self):
if self.name is None:
return f"[GPU] Not found"
return f"[GPU] " \
f"name: {self.name}, " \
f"total: {self.total_memory / 1024**3:.1f} GB, " \
f"used: {self.used_memory / 1024**3:.1f} GB"
command = ["nvidia-smi",
"--query-gpu=name,memory.total,memory.used",
"--format=csv,noheader,nounits"]
try:
p = psutil.Popen(command, stdout=subprocess.PIPE)
stdout, _ = p.communicate()
output = stdout.decode('UTF-8')
info = []
for line in output.split(os.linesep):
if line:
splitted = line.split(',')
if len(splitted) != 3:
logger.error(
f"Received unexpected query result for GPU: {line}")
info.append(GpuInfo())
else:
name = splitted[0]
total = int(splitted[1]) * 1024**2 # MB -> byte
used = int(splitted[2]) * 1024**2 # MB -> byte
info.append(GpuInfo(name, total, used))
if len(info) == 1:
return info[0]
return info
except FileNotFoundError as e:
# raised when 'nvidia-smi' does not exist
logger.debug(repr(e))
return GpuInfo()
except Exception as e:
# We don't want to prevent the app from starting simply because
# failing to get the GPU information.
logger.info(
f"Unexpected error when querying GPU information: {repr(e)}")
return GpuInfo()
def check_system_resource():
"""Check the resource of the current system"""
cpu_info = _get_system_cpu_info()
gpu_info = _get_system_gpu_info()
memory_info = _get_system_memory_info()
return cpu_info, gpu_info, memory_info
class _MetaSingleton(type):
"""Meta class and bookkeeper for Singletons."""
_instances = dict()
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
def query_yes_no(question):
"""Ask a yes/no question and return the answer.
:param str question: the question string.
:return bool: True for yes and False for no.
"""
ans = input(f"{question} (y/n)").lower()
while True:
if ans not in ['y', 'yes', 'n', 'no']:
ans = input('please enter yes (y) or no (n): ')
continue
if ans == 'y' or ans == 'yes':
return True
if ans == 'n' or ans == 'no':
return False
def run_in_thread(daemon=False):
"""Run a function/method in a thread."""
def wrap(f):
@functools.wraps(f)
def threaded_f(*args, **kwargs):
t = Thread(target=f, daemon=daemon, args=args, kwargs=kwargs)
t.start()
return t
return threaded_f
return wrap
|
socket_handler.py | import socket
import socketserver
import struct
import threading
import json
import logging
from models.user import User
from models.phrasestroke import PhraseStroke
from server import database as db
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
SOCKET_PORT = 9696
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
# Read message length first and unpack it into an integer
raw_msglen = self.recvall(self.request, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
# Read the message data
logger.debug("Receiving message of length {}".format(msglen))
message = self.recvall(self.request, msglen)
message_json = json.loads(message)
phrases_list = []
for k in message_json['keys']:
phrases_list.append(PhraseStroke.from_json(k))
user = User.from_json(message_json['user'])
db_user = db.get_or_create_user(user)
user.tags = db_user['tags']
# Run the keras convolutional neural network, append tags
user.set_tags()
user.tags = list(set(user.tags))
db.insert_phrases(user, phrases_list)
def recvall(self, sock, n):
# Helper function to recv n bytes or return None if EOF is hit
data = b''
while len(data) < n:
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
return data
class KeyLoggerTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def start_socket_server():
server_address = ('0.0.0.0', SOCKET_PORT)
server = KeyLoggerTCPServer(server_address, ThreadedTCPRequestHandler)
# Start a thread with the server
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
logger.info("Starting socket server on address {}".format(server_address))
|
vad_test.py | #!/usr/bin/env python3
###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: vad_test.py
# Authors: Chris Lovett
#
# Requires: Python 3.x, numpy, tkinter, matplotlib
#
###################################################################################################
import argparse
import json
import os
import sys
from threading import Thread, Lock, get_ident
import tkinter as tk
from tkinter import BOTH, RIGHT, TOP, X, END
from tkinter import Text
from tkinter.ttk import Frame, LabelFrame, Button, Label, Entry
import numpy as np
import matplotlib
# Embedding matplotlib plots in tkinter views requires using the "TkAgg" backend
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as pyplot
import matplotlib.animation as animation
import featurizer
import wav_reader
import microphone
import vad
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path += [os.path.join(script_dir, "training")]
import make_vad
class VadTest(Frame):
""" A demo class that provides simple GUI for testing voice activity detection on microphone or wav file input. """
def __init__(self, featurizer_path, input_device, wav_file, sample_rate, auto_scale):
""" Initialize the VadTest object:
featurizer_path - path to the ELL featurizer to use
input_device - id of the microphone to use
wav_file - optional wav_file to use when you click play
sample_rate - the sample rate to resample the incoming audio
auto_scale - auto scale audio input to the range [-1, 1]
"""
super().__init__()
self.FEATURIZER_PATH_KEY = "featurizer_path"
self.WAV_FILE_KEY = "wav_file"
self.main_thread = get_ident()
self.output_clear_time = 5000
self.channels = 1
self.init_ui()
self.auto_scale = auto_scale
self.get_settings_file_name()
self.load_settings()
self.max_spectrogram_width = 120
self.spectrogram_image = None
self.spectrogram_image_data = None
self.show_spectrogram = True
self.colormap_name = "inferno"
self.min_value = 0.0
self.max_value = 1.0
self.update_minmax = True
self.levels = []
self.signals = []
self.featurizer_path = None
self.featurizer = None
self.reading_input = False
# Threads
self.read_input_thread = None
self.lock = Lock()
self.main_thread = get_ident()
self.message_queue = []
self.animation = None
# featurizer
if featurizer_path:
self.featurizer_path = featurizer_path
self.settings[self.FEATURIZER_PATH_KEY] = featurizer_path
elif self.FEATURIZER_PATH_KEY in self.settings:
self.featurizer_path = self.settings[self.FEATURIZER_PATH_KEY]
self.sample_rate = sample_rate
self.input_device = input_device
self.wav_filename = None
self.wav_file = None
if wav_file:
self.wav_filename = wav_file
self.settings[self.WAV_FILE_KEY] = wav_file
if self.wav_filename is None and self.WAV_FILE_KEY in self.settings:
self.wav_filename = self.settings[self.WAV_FILE_KEY]
self.wav_file_list = None
self.speaker = None
self.microphone = None
self.save_settings() # in case we just changed it.
if self.featurizer_path:
self.load_featurizer_model(os.path.abspath(self.featurizer_path))
else:
self.show_output("Please specify and load a feature model")
self.update_ui()
def init_ui(self):
self.master.title("VAD Test")
self.pack(side="top", fill=BOTH, expand=True)
# VAD Controls section for controlling these VAD settings:
controls_frame = LabelFrame(self, text="Controls", height=30)
Label(controls_frame, text="tau_up:").grid(row=0, column=0)
self.tau_up = Entry(controls_frame, width=15)
self.tau_up.grid(row=1, column=0)
Label(controls_frame, text="tau_down:").grid(row=0, column=1)
self.tau_down = Entry(controls_frame, width=15)
self.tau_down.grid(row=1, column=1)
Label(controls_frame, text="threshold_up:").grid(row=0, column=2)
self.threshold_up = Entry(controls_frame, width=15)
self.threshold_up.grid(row=1, column=2)
Label(controls_frame, text="threshold_down:").grid(row=0, column=3)
self.threshold_down = Entry(controls_frame, width=15)
self.threshold_down.grid(row=1, column=3)
Label(controls_frame, text="large_input:").grid(row=0, column=4)
self.large_input = Entry(controls_frame, width=15)
self.large_input.grid(row=1, column=4)
Label(controls_frame, text="gain_att:").grid(row=0, column=5)
self.gain_att = Entry(controls_frame, width=15)
self.gain_att.grid(row=1, column=5)
Label(controls_frame, text="level_threshold:").grid(row=0, column=6)
self.level_threshold = Entry(controls_frame, width=15)
self.level_threshold.grid(row=1, column=6)
controls_frame.pack(side=TOP)
# Input section
input_frame = LabelFrame(self, text="Input")
input_frame.bind("-", self.on_minus_key)
input_frame.bind("+", self.on_plus_key)
input_frame.pack(fill=X)
self.play_button = Button(input_frame, text="Play", command=self.on_play_button_click)
self.play_button.pack(side=RIGHT, padx=4)
self.rec_button = Button(input_frame, text="Rec", command=self.on_rec_button_click)
self.rec_button.pack(side=RIGHT, padx=4)
self.wav_filename_entry = Entry(input_frame, width=24)
self.wav_filename_entry.pack(fill=X)
self.wav_filename_entry.delete(0, END)
# Feature section
features_frame = LabelFrame(self, text="Features")
features_frame.pack(fill=X)
features_control_frame = Frame(features_frame)
features_control_frame.pack(fill=X)
load_features_button = Button(features_control_frame, text="Load", command=self.on_load_featurizer_model)
load_features_button.pack(side=RIGHT)
self.features_entry = Entry(features_control_frame, width=8)
self.features_entry.pack(fill=X)
self.features_entry.delete(0, END)
viz_frame = Frame(features_frame)
viz_frame.bind("%w", self.on_resized)
viz_frame.pack(fill=X)
self.features_figure = Figure(figsize=(5, 4), dpi=96)
self.subplot = self.features_figure.add_subplot(211)
self.subplot2 = self.features_figure.add_subplot(212)
self.canvas = FigureCanvasTkAgg(self.features_figure, master=viz_frame)
self.canvas.draw()
self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=True)
# Output section
output_frame = LabelFrame(self, text="Output")
output_frame.pack(fill=BOTH, expand=True)
self.bind("<Configure>", self.on_resized)
self.output_text = Text(output_frame)
self.output_text.pack(fill=BOTH, padx=4, expand=True)
def on_resized(self, event):
window_size = event.width
box = self.spectrogram_image.get_window_extent()
scale = (box.x1 - box.x0) / self.max_spectrogram_width
self.max_spectrogram_width = int((window_size * 0.8) / scale)
self.setup_spectrogram_image()
def load_featurizer_model(self, featurizer_path):
""" load the given compiled ELL featurizer for use in processing subsequent audio input """
if featurizer_path:
self.featurizer = featurizer.AudioTransform(featurizer_path, 40)
self.setup_spectrogram_image()
self.show_output("Feature input size: {}, output size: {}".format(
self.featurizer.input_size,
self.featurizer.output_size))
self.init_data()
def setup_spectrogram_image(self):
""" this need to be called if you load a new feature model, because the featurizer output size might have
changed. """
if self.featurizer:
dim = (self.featurizer.output_size, self.max_spectrogram_width)
self.spectrogram_image_data = np.zeros(dim, dtype=float)
self.subplot.clear()
self.spectrogram_image = self.subplot.imshow(self.spectrogram_image_data, vmin=self.min_value,
vmax=self.max_value, origin="lower", animated=True,
cmap=pyplot.get_cmap(self.colormap_name))
def accumulate_spectrogram_image(self, feature_data):
""" accumulate the feature data into the spectrogram image """
image_data = self.spectrogram_image_data
feature_data = np.reshape(feature_data, [-1, 1])
new_image = np.hstack((image_data, feature_data))[:, -image_data.shape[1]:]
image_data[:, :] = new_image
def set_spectrogram_image(self):
""" update the spectrogram image and the min/max values """
self.lock.acquire() # protect access to the shared state
if self.update_minmax and self.show_spectrogram:
min_value = np.min(self.spectrogram_image_data)
max_value = np.max(self.spectrogram_image_data)
if np.isfinite(min_value) and np.isfinite(max_value):
self.min_value = min_value
self.max_value = max_value
eps = 0.1
if self.max_value - self.min_value < eps:
self.max_value = self.min_value + eps
self.spectrogram_image.set_clim(self.min_value, self.max_value)
self.spectrogram_image.set_data(self.spectrogram_image_data)
self.lock.release()
def on_load_featurizer_model(self):
""" called when user clicks the Load button for the feature model """
filename = self.features_entry.get()
filename = filename.strip('"')
self.featurizer_path = filename
self.get_sample_rate()
self.settings[self.FEATURIZER_PATH_KEY] = filename
self.save_settings()
self.stop()
self.load_featurizer_model(filename)
def set_entry(self, e, value):
s = str(value)
if e.get() != s:
e.delete(0, END)
e.insert(0, s)
def get_entry(self, e):
v = e.get()
return float(v)
def update_ui(self):
self.set_entry(self.wav_filename_entry, self.wav_filename)
self.set_entry(self.features_entry, self.featurizer_path)
self.set_entry(self.tau_up, make_vad.VAD_DEFAULTS["tau_up"])
self.set_entry(self.tau_down, make_vad.VAD_DEFAULTS["tau_down"])
self.set_entry(self.threshold_up, make_vad.VAD_DEFAULTS["threshold_up"])
self.set_entry(self.threshold_down, make_vad.VAD_DEFAULTS["threshold_down"])
self.set_entry(self.large_input, make_vad.VAD_DEFAULTS["large_input"])
self.set_entry(self.gain_att, make_vad.VAD_DEFAULTS["gain_att"])
self.set_entry(self.level_threshold, make_vad.VAD_DEFAULTS["level_threshold"])
def create_vad(self):
vad_options = {
"tau_up": self.get_entry(self.tau_up),
"tau_down": self.get_entry(self.tau_down),
"threshold_up": self.get_entry(self.threshold_up),
"threshold_down": self.get_entry(self.threshold_down),
"large_input": self.get_entry(self.large_input),
"gain_att": self.get_entry(self.gain_att),
"level_threshold": self.get_entry(self.level_threshold)
}
model = make_vad.make_vad("vad.ell", self.sample_rate, self.featurizer.input_size,
self.featurizer.output_size, vad_options)
self.vad = vad.VoiceActivityDetector(model)
def init_data(self):
""" initialize the spectrogram_image_data based on the newly loaded model info """
if self.featurizer:
dim = (self.featurizer.output_size, self.max_spectrogram_width)
self.spectrogram_image_data = np.zeros(dim, dtype=float)
if self.spectrogram_image is not None:
self.spectrogram_image.set_data(self.spectrogram_image_data)
def get_settings_file_name(self):
""" this app stores the various UI field values in a settings file in your temp folder
so you don't always have to specify the full command line options """
import tempfile
temp = tempfile.gettempdir()
self.settings_file_name = os.path.join(temp, "ELL", "Audio", "vad_test.json")
def load_settings(self):
""" load the previously saved settings from disk, if any """
self.settings = {}
try:
if os.path.isfile(self.settings_file_name):
with open(self.settings_file_name, "r") as f:
self.settings = json.load(f)
except:
self.show_output("error loading settings: {}".format(self.settings_file_name))
self.settings = {}
def save_settings(self):
""" save the current settings to disk """
settings_dir = os.path.dirname(self.settings_file_name)
if not os.path.isdir(settings_dir):
os.makedirs(settings_dir)
with open(self.settings_file_name, "w") as f:
json.dump(self.settings, f, indent=2)
def on_rec_button_click(self):
""" called when user clicks the record button, same button is used to "stop" recording. """
if self.rec_button["text"] == "Rec":
self.rec_button["text"] = "Stop"
self.play_button["text"] = "Play"
self.start_recording()
else:
self.rec_button["text"] = "Rec"
self.on_stopped()
def on_play_button_click(self):
""" called when user clicks the record button, same button is used to "stop" playback """
if self.play_button["text"] == "Play":
self.play_button["text"] = "Stop"
self.rec_button["text"] = "Rec"
self.on_play()
else:
self.play_button["text"] = "Play"
self.on_stopped()
def on_play(self):
""" called when user clicks the Play button """
filename = self.wav_filename_entry.get()
filename = filename.strip('"')
self.wav_filename = filename
self.settings[self.WAV_FILE_KEY] = filename
self.save_settings()
self.start_playing(filename)
def on_stop(self):
""" called when user clicks the Stop button """
self.reading_input = False
if self.wav_file:
self.wav_file.close()
self.wav_file = None
if self.read_input_thread:
self.read_input_thread.join()
self.read_input_thread = None
self.stop()
def on_stopped(self):
""" called when we reach the end of the wav file playback """
self.play_button["text"] = "Play"
self.on_stop()
self.subplot2.clear()
if (len(self.levels) > 0):
levels = np.array(self.levels)
levels /= np.max(levels)
signals = np.array(self.signals)
self.subplot2.plot(levels)
self.subplot2.plot(signals)
self.canvas.draw()
self.levels = []
self.signals = []
def stop(self):
""" called when user clicks the stop button, or we reach the end of a wav file input """
# close streams
if self.animation:
self.animation.event_source.stop()
self.animation = None
if self.microphone:
self.microphone.close()
if self.speaker:
self.speaker.close()
if self.wav_file:
self.wav_file.close()
self.wav_file = None
self.reading_input = False
def get_wav_list(self):
if self.wav_filename and os.path.isfile(self.wav_filename):
full_path = os.path.abspath(self.wav_filename)
dir_name = os.path.dirname(full_path)
if not self.wav_file_list:
print("wav file name: {}".format(full_path))
print("looking for wav files in: {}".format(dir_name))
self.wav_file_list = [x for x in os.listdir(dir_name) if os.path.splitext(x)[1] == ".wav"]
self.wav_file_list.sort()
return self.wav_file_list
def select_wav_file(self, filename):
self.wav_filename = filename
# show the file in the UI
self.wav_filename_entry.delete(0, END)
if self.wav_filename:
self.wav_filename_entry.insert(0, self.wav_filename)
# and automatically play the file.
self.on_play()
def on_minus_key(self, event):
""" When user presses the plus button we reverse to the previous wav file in the current folder.
This way you can easily step through all the training wav files """
if self.get_wav_list():
i = self.wav_file_list.index(os.path.basename(self.wav_filename))
if i - 1 >= 0:
next_wav_file = self.wav_file_list[i - 1]
dir_name = os.path.dirname(self.wav_filename)
self.select_wav_file(os.path.join(dir_name, next_wav_file))
def on_plus_key(self, event):
""" When user presses the plus button we advance to the next wav file in the current folder.
This way you can easily step through all the training wav files """
if self.get_wav_list():
i = self.wav_file_list.index(os.path.basename(self.wav_filename))
if i + 1 < len(self.wav_file_list):
next_wav_file = self.wav_file_list[i + 1]
dir_name = os.path.dirname(self.wav_filename)
self.select_wav_file(os.path.join(dir_name, next_wav_file))
def clear_output(self):
""" remove some of the Output based a the timeout callback """
self.output_text.delete(1.0, 2.0)
def process_output(self):
""" show output that was queued by background thread """
self.lock.acquire()
messages = self.message_queue
self.message_queue = []
self.lock.release()
for msg in messages:
self.show_output(msg)
def show_output(self, message):
""" show output message, or queue it if we are on a background thread """
if self.main_thread != get_ident():
self.message_queue += [message]
return
for line in str(message).split('\n'):
self.output_text.insert(END, "{}\n".format(line))
self.output_text.see("end") # scroll to end
self.after(self.output_clear_time, self.clear_output)
def start_playing(self, filename):
"""
Play a wav file, and classify the audio. Note we use a background thread to read the
wav file and we setup a UI animation function to draw the sliding spectrogram image, this way
the UI update doesn't interfere with the smoothness of the audio playback
"""
self.stop()
self.create_vad()
self.reading_input = False
self.wav_file = wav_reader.WavReader(self.sample_rate, self.channels, auto_scale=self.auto_scale)
self.wav_file.open(filename, self.featurizer.input_size, self.speaker)
self.setup_spectrogram_image()
def update_func(frame_index):
self.process_output()
if not self.reading_input:
self.after(1, self.on_stopped)
self.set_spectrogram_image()
return (self.spectrogram_image,)
if self.animation:
self.animation.event_source.stop()
self.reading_input = True
# Start animation timer for updating the UI (e.g. spectrogram image) (30 fps is usually fine)
self.animation = animation.FuncAnimation(self.features_figure, update_func, interval=33, blit=True)
# start background thread to read and classify the audio.
self.featurizer.open(self.wav_file)
self.read_input_thread = Thread(target=self.on_read_features, args=())
self.read_input_thread.daemon = True
self.read_input_thread.start()
def start_recording(self):
""" Start recording audio from the microphone and classify the audio. Note we use a background thread to
process the audio and we setup a UI animation function to draw the sliding spectrogram image, this way
the UI update doesn't interfere with the smoothness of the microphone readings """
if self.microphone is None:
self.microphone = microphone.Microphone(True, False)
self.stop()
self.create_vad()
num_channels = 1
self.microphone.open(self.featurizer.input_size, self.sample_rate, num_channels, self.input_device)
def update_func(frame_index):
# this is an animation callback to update the UI every 33 milliseconds.
self.process_output()
self.set_spectrogram_image()
if not self.reading_input:
self.after(1, self.on_stopped)
return (self.spectrogram_image,)
if self.animation:
self.animation.event_source.stop()
self.reading_input = True
# Start animation timer for updating the UI (e.g. spectrogram image) (30 fps is usually fine)
self.animation = animation.FuncAnimation(self.features_figure, update_func, interval=33, blit=True)
# start background thread to read and classify the recorded audio.
self.featurizer.open(self.microphone)
self.read_input_thread = Thread(target=self.on_read_features, args=())
self.read_input_thread.daemon = True
self.read_input_thread.start()
def on_read_features(self):
""" this is the background thread entry point. So we read the feature data in a loop """
try:
while self.reading_input and self.featurizer:
feature_data = self.featurizer.read()
if feature_data is None:
break # eof
else:
signal = self.vad.predict(feature_data)
level = np.sum(feature_data)
self.levels += [level]
self.signals += [signal]
self.lock.acquire()
if self.show_spectrogram:
self.accumulate_spectrogram_image(feature_data)
self.lock.release()
except:
errorType, value, traceback = sys.exc_info()
print("### Exception reading input: " + str(errorType) + ": " + str(value) + " " + str(traceback))
while traceback:
print(traceback.tb_frame.f_code)
traceback = traceback.tb_next
self.reading_input = False
def main(featurizer, input_device, wav_file, sample_rate, auto_scale):
""" Main function to create root UI and AudioDemo object, then run the main UI loop """
root = tk.Tk()
root.geometry("800x800")
app = VadTest(featurizer, input_device, wav_file, sample_rate, auto_scale)
root.bind("+", app.on_plus_key)
root.bind("-", app.on_minus_key)
while True:
try:
root.mainloop()
break
except UnicodeDecodeError:
pass
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Test a feature model and optional classifier in a handy GUI app")
# options
arg_parser.add_argument("--featurizer", "-m", help="Compiled ELL model to use for generating features",
default=None)
arg_parser.add_argument("--input_device", "-d", help="Index of input device (see --list_devices)",
default=1, type=int)
arg_parser.add_argument("--list_devices", help="List available input devices", action="store_true")
arg_parser.add_argument("--wav_file", help="Provide an input wav file to test", default=None)
arg_parser.add_argument("--auto_scale", help="Auto-scale autio input to range [-1,1]", action="store_true")
arg_parser.add_argument("--sample_rate", type=int, help="The sample rate that featurizer is setup to use",
default=16000)
args = arg_parser.parse_args()
if args.list_devices:
microphone.list_devices()
else:
main(args.featurizer, args.input_device, args.wav_file, args.sample_rate, args.auto_scale)
|
runner.py | #!/usr/bin/env python
'''
Simple test runner
See settings.py file for options¶ms. Edit as needed.
These tests can be run in parallel using nose, for example
nosetests --processes=4 -v -s tests/runner.py
will use 4 processes. To install nose do something like
|pip install nose| or |sudo apt-get install python-nose|.
'''
from subprocess import Popen, PIPE, STDOUT
import os, unittest, tempfile, shutil, time, inspect, sys, math, glob, tempfile, re, difflib, webbrowser, hashlib, threading, platform, BaseHTTPServer, multiprocessing, functools
if len(sys.argv) == 1:
print '''
==============================================================================
Running the main part of the test suite. Don't forget to run the other parts!
sanity - tests for first run, etc., modifies ~/.emscripten
benchmark - run before and after each set of changes before pushing to
master, verify no regressions
browser - runs pages in a web browser
To run one of those parts, do something like
python tests/runner.py sanity
To run a specific set of tests, you can do things like
python tests/runner.py o1
(that runs the o1 (-O1) tests). You can run individual tests with
python tests/runner.py test_hello_world
Combinations work too, for example
python tests/runner.py browser.test_sdl_image
==============================================================================
'''
time.sleep(2)
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path += [path_from_root('')]
import tools.shared
from tools.shared import *
# Sanity check for config
try:
assert COMPILER_OPTS != None
except:
raise Exception('Cannot find "COMPILER_OPTS" definition. Is %s set up properly? You may need to copy the template from settings.py into it.' % EM_CONFIG)
# Core test runner class, shared between normal tests and benchmarks
class RunnerCore(unittest.TestCase):
save_dir = os.environ.get('EM_SAVE_DIR')
save_JS = 0
stderr_redirect = STDOUT # This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
def setUp(self):
global Settings
Settings.reset()
Settings = tools.shared.Settings
self.banned_js_engines = []
if not self.save_dir:
dirname = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=TEMP_DIR)
else:
dirname = CANONICAL_TEMP_DIR
if not os.path.exists(dirname):
os.makedirs(dirname)
self.working_dir = dirname
os.chdir(dirname)
def tearDown(self):
if self.save_JS:
for name in os.listdir(self.get_dir()):
if name.endswith(('.o.js', '.cc.js')):
suff = '.'.join(name.split('.')[-2:])
shutil.copy(os.path.join(self.get_dir(), name),
os.path.join(TEMP_DIR, self.id().replace('__main__.', '').replace('.test_', '.')+'.'+suff))
if not self.save_dir:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.join(self.get_dir(), '..'))
shutil.rmtree(self.get_dir())
def skip(self, why):
print >> sys.stderr, '<skipping: %s> ' % why,
def get_dir(self):
return self.working_dir
def get_shared_library_name(self, linux_name):
if platform.system() == 'Linux':
return linux_name
elif platform.system() == 'Darwin':
return linux_name.replace('.so', '') + '.dylib'
else:
print >> sys.stderr, 'get_shared_library_name needs to be implemented on %s' % platform.system()
return linux_name
def get_stdout_path(self):
return os.path.join(self.get_dir(), 'stdout')
def prep_ll_run(self, filename, ll_file, force_recompile=False, build_ll_hook=None):
if ll_file.endswith(('.bc', '.o')):
if ll_file != filename + '.o':
shutil.copy(ll_file, filename + '.o')
Building.llvm_dis(filename)
else:
shutil.copy(ll_file, filename + '.o.ll')
#force_recompile = force_recompile or os.stat(filename + '.o.ll').st_size > 50000 # if the file is big, recompile just to get ll_opts # Recompiling just for dfe in ll_opts is too costly
if Building.LLVM_OPTS or force_recompile or build_ll_hook:
Building.ll_opts(filename)
if build_ll_hook:
need_post = build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.pre') # for comparisons later
if Building.LLVM_OPTS:
Building.llvm_opts(filename)
Building.llvm_dis(filename)
if build_ll_hook and need_post:
build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.post') # for comparisons later
Building.llvm_dis(filename)
# Generate JS from ll, and optionally modify the generated JS with a post_build function. Note
# that post_build is called on unoptimized JS, so we send it to emcc (otherwise, if run after
# emcc, it would not apply on the optimized/minified JS)
def ll_to_js(self, filename, extra_emscripten_args, post_build):
if type(post_build) in (list, tuple):
post1, post2 = post_build
else:
post1 = post_build
post2 = None
def run_post(post):
if not post: return
exec post in locals()
shutil.copyfile(filename + '.o.js', filename + '.o.js.prepost.js')
process(filename + '.o.js')
if self.emcc_args is None:
Building.emscripten(filename, append_ext=True, extra_args=extra_emscripten_args)
run_post(post1)
run_post(post2)
else:
transform_args = []
if post1:
transform_filename = os.path.join(self.get_dir(), 'transform.py')
transform = open(transform_filename, 'w')
transform.write('''
import sys
sys.path += [%r]
''' % path_from_root(''))
transform.write(post1)
transform.write('''
process(sys.argv[1])
''')
transform.close()
transform_args = ['--js-transform', "python %s" % transform_filename]
Building.emcc(filename + '.o.ll', Settings.serialize() + self.emcc_args + transform_args, filename + '.o.js')
run_post(post2)
# Build JavaScript code from source code
def build(self, src, dirname, filename, output_processor=None, main_file=None, additional_files=[], libraries=[], includes=[], build_ll_hook=None, extra_emscripten_args=[], post_build=None):
Building.pick_llvm_opts(3) # pick llvm opts here, so we include changes to Settings in the test case code
# Copy over necessary files for compiling the source
if main_file is None:
f = open(filename, 'w')
f.write(src)
f.close()
final_additional_files = []
for f in additional_files:
final_additional_files.append(os.path.join(dirname, os.path.basename(f)))
shutil.copyfile(f, final_additional_files[-1])
additional_files = final_additional_files
else:
# copy whole directory, and use a specific main .cpp file
# (rmtree() fails on Windows if the current working directory is inside the tree.)
if os.getcwd().startswith(os.path.abspath(dirname)):
os.chdir(os.path.join(dirname, '..'))
shutil.rmtree(dirname)
shutil.copytree(src, dirname)
shutil.move(os.path.join(dirname, main_file), filename)
# the additional files were copied; alter additional_files to point to their full paths now
additional_files = map(lambda f: os.path.join(dirname, f), additional_files)
os.chdir(self.get_dir())
# C++ => LLVM binary
for f in [filename] + additional_files:
try:
# Make sure we notice if compilation steps failed
os.remove(f + '.o')
except:
pass
args = [Building.COMPILER, '-emit-llvm'] + COMPILER_OPTS + Building.COMPILER_TEST_OPTS + \
['-I', dirname, '-I', os.path.join(dirname, 'include')] + \
map(lambda include: '-I' + include, includes) + \
['-c', f, '-o', f + '.o']
output = Popen(args, stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
assert os.path.exists(f + '.o'), 'Source compilation error: ' + output
# Link all files
if len(additional_files) + len(libraries) > 0:
shutil.move(filename + '.o', filename + '.o.alone')
Building.link([filename + '.o.alone'] + map(lambda f: f + '.o', additional_files) + libraries,
filename + '.o')
if not os.path.exists(filename + '.o'):
print "Failed to link LLVM binaries:\n\n", output
raise Exception("Linkage error");
# Finalize
self.prep_ll_run(filename, filename + '.o', build_ll_hook=build_ll_hook)
# BC => JS
self.ll_to_js(filename, extra_emscripten_args, post_build)
if output_processor is not None:
output_processor(open(filename + '.o.js').read())
def run_generated_code(self, engine, filename, args=[], check_timeout=True):
stdout = os.path.join(self.get_dir(), 'stdout') # use files, as PIPE can get too full and hang us
stderr = os.path.join(self.get_dir(), 'stderr')
try:
cwd = os.getcwd()
except:
cwd = None
os.chdir(self.get_dir())
run_js(filename, engine, args, check_timeout, stdout=open(stdout, 'w'), stderr=open(stderr, 'w'))
if cwd is not None:
os.chdir(cwd)
ret = open(stdout, 'r').read() + open(stderr, 'r').read()
assert 'strict warning:' not in ret, 'We should pass all strict mode checks: ' + ret
return ret
def build_native(self, filename):
Popen([CLANG, '-O2', filename, '-o', filename+'.native'], stdout=PIPE).communicate()[0]
def run_native(self, filename, args):
Popen([filename+'.native'] + args, stdout=PIPE).communicate()[0]
def assertIdentical(self, x, y):
if x != y:
raise Exception("Expected to have '%s' == '%s', diff:\n\n%s" % (
limit_size(x), limit_size(y),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(x.split('\n'), y.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]: values = [values]
for value in values:
if type(string) is not str: string = string()
if value in string: return # success
raise Exception("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')])),
additional_info
))
def assertNotContained(self, value, string):
if type(value) is not str: value = value() # lazy loading
if type(string) is not str: string = string()
if value in string:
raise Exception("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
if not os.path.exists(ret):
os.makedirs(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'], configure_args=[], make=['make'], make_args=['-j', '2'], cache=True):
build_dir = self.get_build_dir()
output_dir = self.get_dir()
cache_name = name + '|' + Building.COMPILER
if self.library_cache is not None:
if cache and self.library_cache.get(cache_name):
print >> sys.stderr, '<load build from cache> ',
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, basename)
f = open(bc_file, 'wb')
f.write(contents)
f.close()
generated_libs.append(bc_file)
return generated_libs
print >> sys.stderr, '<building and saving into cache> ',
return Building.build_library(name, build_dir, output_dir, generated_libs, configure, configure_args, make, make_args, self.library_cache, cache_name,
copy_project=True)
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(name)
emcc_debug = os.environ.get('EMCC_DEBUG')
if emcc_debug:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
###################################################################################################
sys.argv = map(lambda arg: arg if not arg.startswith('test_') else 'default.' + arg, sys.argv)
if 'benchmark' not in str(sys.argv) and 'sanity' not in str(sys.argv) and 'browser' not in str(sys.argv):
# Tests
print "Running Emscripten tests..."
class T(RunnerCore): # Short name, to make it more fun to use manually on the commandline
## Does a complete test - builds, runs, checks output, etc.
def do_run(self, src, expected_output, args=[], output_nicerizer=None, output_processor=None, no_build=False, main_file=None, additional_files=[], js_engines=None, post_build=None, basename='src.cpp', libraries=[], includes=[], force_c=False, build_ll_hook=None, extra_emscripten_args=[]):
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
Building.COMPILER = to_cc(Building.COMPILER)
dirname = self.get_dir()
filename = os.path.join(dirname, basename)
if not no_build:
self.build(src, dirname, filename, main_file=main_file, additional_files=additional_files, libraries=libraries, includes=includes,
build_ll_hook=build_ll_hook, extra_emscripten_args=extra_emscripten_args, post_build=post_build)
# Run in both JavaScript engines, if optimizing - significant differences there (typed arrays)
if js_engines is None:
js_engines = JS_ENGINES
if Settings.USE_TYPED_ARRAYS:
js_engines = filter(lambda engine: engine != V8_ENGINE, js_engines) # V8 issue 1822
js_engines = filter(lambda engine: engine not in self.banned_js_engines, js_engines)
if len(js_engines) == 0: return self.skip('No JS engine present to run this test with. Check %s and settings.py and the paths therein.' % EM_CONFIG)
for engine in js_engines:
js_output = self.run_generated_code(engine, filename + '.o.js', args)
if output_nicerizer is not None:
js_output = output_nicerizer(js_output)
self.assertContained(expected_output, js_output)
self.assertNotContained('ERROR', js_output)
#shutil.rmtree(dirname) # TODO: leave no trace in memory. But for now nice for debugging
# No building - just process an existing .ll file (or .bc, which we turn into .ll)
def do_ll_run(self, ll_file, expected_output=None, args=[], js_engines=None, output_nicerizer=None, post_build=None, force_recompile=False, build_ll_hook=None, extra_emscripten_args=[]):
filename = os.path.join(self.get_dir(), 'src.cpp')
self.prep_ll_run(filename, ll_file, force_recompile, build_ll_hook)
self.ll_to_js(filename, extra_emscripten_args, post_build)
self.do_run(None,
expected_output,
args,
no_build=True,
js_engines=js_engines,
output_nicerizer=output_nicerizer,
post_build=None) # post_build was already done in ll_to_js, this do_run call is just to test the output
def test_hello_world(self):
src = '''
#include <stdio.h>
int main()
{
printf("hello, world!\\n");
return 0;
}
'''
self.do_run(src, 'hello, world!')
def test_intvars(self):
src = '''
#include <stdio.h>
int global = 20;
int *far;
int main()
{
int x = 5;
int y = x+17;
int z = (y-1)/2; // Should stay an integer after division!
y += 1;
int w = x*3+4;
int k = w < 15 ? 99 : 101;
far = &k;
*far += global;
int i = k > 100; // Should be an int, not a bool!
int j = i << 6;
j >>= 1;
j = j ^ 5;
int h = 1;
h |= 0;
int p = h;
p &= 0;
printf("*%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n", x, y, z, w, k, i, j, h, p);
long hash = -1;
size_t perturb;
int ii = 0;
for (perturb = hash; ; perturb >>= 5) {
printf("%d:%d", ii, perturb);
ii++;
if (ii == 9) break;
printf(",");
}
printf("*\\n");
printf("*%.1d,%.2d*\\n", 56, 9);
// Fixed-point math on 64-bit ints. Tricky to support since we have no 64-bit shifts in JS
{
struct Fixed {
static int Mult(int a, int b) {
return ((long long)a * (long long)b) >> 16;
}
};
printf("fixed:%d\\n", Fixed::Mult(150000, 140000));
}
printf("*%ld*%p\\n", (long)21, &hash); // The %p should not enter an infinite loop!
return 0;
}
'''
self.do_run(src, '*5,23,10,19,121,1,37,1,0*\n0:-1,1:134217727,2:4194303,3:131071,4:4095,5:127,6:3,7:0,8:0*\n*56,09*\nfixed:320434\n*21*')
def test_sintvars(self):
Settings.CORRECT_SIGNS = 1 # Relevant to this test
src = '''
#include <stdio.h>
struct S {
char *match_start;
char *strstart;
};
int main()
{
struct S _s;
struct S *s = &_s;
unsigned short int sh;
s->match_start = (char*)32522;
s->strstart = (char*)(32780);
printf("*%d,%d,%d*\\n", (int)s->strstart, (int)s->match_start, (int)(s->strstart - s->match_start));
sh = s->strstart - s->match_start;
printf("*%d,%d*\\n", sh, sh>>7);
s->match_start = (char*)32999;
s->strstart = (char*)(32780);
printf("*%d,%d,%d*\\n", (int)s->strstart, (int)s->match_start, (int)(s->strstart - s->match_start));
sh = s->strstart - s->match_start;
printf("*%d,%d*\\n", sh, sh>>7);
}
'''
output = '*32780,32522,258*\n*258,2*\n*32780,32999,-219*\n*65317,510*'
Settings.CORRECT_OVERFLOWS = 0 # We should not need overflow correction to get this right
self.do_run(src, output, force_c=True)
def test_i64(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
src = '''
#include <stdio.h>
int main()
{
long long a = 0x2b00505c10;
long long b = a >> 29;
long long c = a >> 32;
long long d = a >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\\n", a, b, c, d);
unsigned long long ua = 0x2b00505c10;
unsigned long long ub = ua >> 29;
unsigned long long uc = ua >> 32;
unsigned long long ud = ua >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\\n", ua, ub, uc, ud);
long long x = 0x0000def123450789ULL; // any bigger than this, and we
long long y = 0x00020ef123456089ULL; // start to run into the double precision limit!
printf("*%Ld,%Ld,%Ld,%Ld,%Ld*\\n", x, y, x | y, x & y, x ^ y, x >> 2, y << 2);
printf("*");
long long z = 13;
int n = 0;
while (z > 1) {
printf("%.2f,", (float)z); // these must be integers!
z = z >> 1;
n++;
}
printf("*%d*\\n", n);
return 0;
}
'''
self.do_run(src, '*184688860176,344,43,10*\n*184688860176,344,43,10*\n*245127260211081,579378795077769,808077213656969,16428841631881,791648372025088*\n*13.00,6.00,3.00,*3*')
src = r'''
#include <time.h>
#include <stdio.h>
#include <stdint.h>
int64_t returner1() { return 0x0000def123450789ULL; }
int64_t returner2(int test) {
while (test > 10) test /= 2; // confuse the compiler so it doesn't eliminate this function
return test > 5 ? 0x0000def123450123ULL : 0ULL;
}
void modifier1(int64_t t) {
t |= 12;
printf("m1: %Ld\n", t);
}
void modifier2(int64_t &t) {
t |= 12;
}
int truthy() {
int x = time(0);
while (x > 10) {
x |= 7;
x /= 2;
}
return x < 3;
}
struct IUB {
int c;
long long d;
};
IUB iub[] = {
{ 55, 17179869201 },
{ 122, 25769803837 },
};
int main(int argc, char **argv)
{
int64_t x1 = 0x1234def123450789ULL;
int64_t x2 = 0x1234def123450788ULL;
int64_t x3 = 0x1234def123450789ULL;
printf("*%Ld\n%d,%d,%d,%d,%d\n%d,%d,%d,%d,%d*\n", x1, x1==x2, x1<x2, x1<=x2, x1>x2, x1>=x2, // note: some rounding in the printing!
x1==x3, x1<x3, x1<=x3, x1>x3, x1>=x3);
printf("*%Ld*\n", returner1());
printf("*%Ld*\n", returner2(30));
uint64_t maxx = -1ULL;
printf("*%Lu*\n*%Lu*\n", maxx, maxx >> 5);
// Make sure params are not modified if they shouldn't be
int64_t t = 123;
modifier1(t);
printf("*%Ld*\n", t);
modifier2(t);
printf("*%Ld*\n", t);
// global structs with i64s
printf("*%d,%Ld*\n*%d,%Ld*\n", iub[0].c, iub[0].d, iub[1].c, iub[1].d);
// Bitshifts
{
int64_t a = -1;
int64_t b = a >> 29;
int64_t c = a >> 32;
int64_t d = a >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\n", a, b, c, d);
uint64_t ua = -1;
int64_t ub = ua >> 29;
int64_t uc = ua >> 32;
int64_t ud = ua >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\n", ua, ub, uc, ud);
}
// Nonconstant bitshifts
{
int64_t a = -1;
int64_t b = a >> (29 - argc + 1);
int64_t c = a >> (32 - argc + 1);
int64_t d = a >> (34 - argc + 1);
printf("*%Ld,%Ld,%Ld,%Ld*\n", a, b, c, d);
uint64_t ua = -1;
int64_t ub = ua >> (29 - argc + 1);
int64_t uc = ua >> (32 - argc + 1);
int64_t ud = ua >> (34 - argc + 1);
printf("*%Ld,%Ld,%Ld,%Ld*\n", ua, ub, uc, ud);
}
// Math mixtures with doubles
{
uint64_t a = 5;
double b = 6.8;
uint64_t c = a * b;
printf("*prod:%llu*\n*%d,%d,%d*\n", c, (int)&a, (int)&b, (int)&c); // printing addresses prevents optimizations
}
// Basic (rounded, for now) math. Just check compilation.
int64_t a = 0x1234def123450789ULL;
a--; if (truthy()) a--; // confuse optimizer
int64_t b = 0x1234000000450789ULL;
b++; if (truthy()) b--; // confuse optimizer
printf("*%Ld,%Ld,%Ld,%Ld*\n", (a+b)/5000, (a-b)/5000, (a*3)/5000, (a/5)/5000);
return 0;
}
'''
self.do_run(src, '*1311918518731868041\n' +
'0,0,0,1,1\n' +
'1,0,1,0,1*\n' +
'*245127260211081*\n' +
'*245127260209443*\n' +
'*18446744073709551615*\n' +
'*576460752303423487*\n' +
'm1: 127\n' +
'*123*\n' +
'*127*\n' +
'*55,17179869201*\n' +
'*122,25769803837*\n' +
'*-1,-1,-1,-1*\n' +
'*-1,34359738367,4294967295,1073741823*\n' +
'*-1,-1,-1,-1*\n' +
'*-1,34359738367,4294967295,1073741823*\n' +
'*prod:34*')
src = r'''
#include <stdio.h>
#include <limits>
int main()
{
long long i,j,k;
i = 0;
j = -1,
k = 1;
printf( "*\n" );
printf( "%s\n", i > j ? "Ok": "Fail" );
printf( "%s\n", k > i ? "Ok": "Fail" );
printf( "%s\n", k > j ? "Ok": "Fail" );
printf( "%s\n", i < j ? "Fail": "Ok" );
printf( "%s\n", k < i ? "Fail": "Ok" );
printf( "%s\n", k < j ? "Fail": "Ok" );
printf( "%s\n", (i-j) >= k ? "Ok": "Fail" );
printf( "%s\n", (i-j) <= k ? "Ok": "Fail" );
printf( "%s\n", i > std::numeric_limits<long long>::min() ? "Ok": "Fail" );
printf( "%s\n", i < std::numeric_limits<long long>::max() ? "Ok": "Fail" );
printf( "*\n" );
}
'''
self.do_run(src, '*\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\n*')
# stuff that also needs sign corrections
Settings.CORRECT_SIGNS = 1
src = r'''
#include <stdio.h>
#include <stdint.h>
int main()
{
// i32 vs i64
int32_t small = -1;
int64_t large = -1;
printf("*%d*\n", small == large);
small++;
printf("*%d*\n", small == large);
uint32_t usmall = -1;
uint64_t ularge = -1;
printf("*%d*\n", usmall == ularge);
return 0;
}
'''
self.do_run(src, '*1*\n*0*\n*0*\n')
def test_i64_b(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdio.h>
#include <sys/time.h>
typedef long long int64;
#define PRMJ_USEC_PER_SEC 1000000L
int main(int argc, char * argv[]) {
int64 sec = 1329409675 + argc;
int64 usec = 2329509675;
int64 mul = int64(sec) * PRMJ_USEC_PER_SEC;
int64 add = mul + int64(usec);
int add_low = add;
int add_high = add >> 32;
printf("*%lld,%lld,%u,%u*\n", mul, add, add_low, add_high);
return 0;
}
'''
self.do_run(src, '*1329409676000000,1329412005509675,3663280683,309527*\n')
def test_i64_cmp(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdio.h>
typedef long long int64;
bool compare(int64 val) {
return val == -12;
}
bool compare2(int64 val) {
return val < -12;
}
int main(int argc, char * argv[]) {
printf("*%d,%d,%d,%d,%d,%d*\n", argc, compare(argc-1-12), compare(1000+argc), compare2(argc-1-10), compare2(argc-1-14), compare2(argc+1000));
return 0;
}
'''
self.do_run(src, '*1,1,0,0,1,0*\n')
def test_i64_cmp2(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <inttypes.h>
#include <stdio.h>
typedef int32_t INT32;
typedef int64_t INT64;
typedef uint8_t UINT8;
void interface_clock_changed()
{
UINT8 m_divshift;
INT32 m_divisor;
//INT64 attos = m_attoseconds_per_cycle;
INT64 attos = 279365114840;
m_divshift = 0;
while (attos >= (1UL << 31))
{
m_divshift++;
printf("m_divshift is %i, on %Ld >?= %lu\n", m_divshift, attos, 1UL << 31);
attos >>= 1;
}
m_divisor = attos;
printf("m_divisor is %i\n",m_divisor);
}
int main() {
interface_clock_changed();
return 0;
}
'''
self.do_run(src, '''m_divshift is 1, on 279365114840 >?= 2147483648
m_divshift is 2, on 139682557420 >?= 2147483648
m_divshift is 3, on 69841278710 >?= 2147483648
m_divshift is 4, on 34920639355 >?= 2147483648
m_divshift is 5, on 17460319677 >?= 2147483648
m_divshift is 6, on 8730159838 >?= 2147483648
m_divshift is 7, on 4365079919 >?= 2147483648
m_divshift is 8, on 2182539959 >?= 2147483648
m_divisor is 1091269979
''')
def test_i64_double(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdio.h>
typedef long long int64;
#define JSDOUBLE_HI32_SIGNBIT 0x80000000
bool JSDOUBLE_IS_NEGZERO(double d)
{
union {
struct {
unsigned int lo, hi;
} s;
double d;
} x;
if (d != 0)
return false;
x.d = d;
return (x.s.hi & JSDOUBLE_HI32_SIGNBIT) != 0;
}
bool JSINT64_IS_NEGZERO(int64 l)
{
union {
int64 i;
double d;
} x;
if (l != 0)
return false;
x.i = l;
return x.d == -0;
}
int main(int argc, char * argv[]) {
printf("*%d,%d,%d,%d*\n", JSDOUBLE_IS_NEGZERO(0), JSDOUBLE_IS_NEGZERO(-0), JSDOUBLE_IS_NEGZERO(-1), JSDOUBLE_IS_NEGZERO(+1));
printf("*%d,%d,%d,%d*\n", JSINT64_IS_NEGZERO(0), JSINT64_IS_NEGZERO(-0), JSINT64_IS_NEGZERO(-1), JSINT64_IS_NEGZERO(+1));
return 0;
}
'''
self.do_run(src, '*0,0,0,0*\n*1,1,0,0*\n') # same as gcc
def test_i64_umul(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <inttypes.h>
#include <stdio.h>
typedef uint32_t UINT32;
typedef uint64_t UINT64;
int main() {
volatile UINT32 testu32a = 2375724032U;
UINT32 bigu32 = 0xffffffffU;
volatile UINT64 testu64a = 14746250828952703000U;
while ((UINT64)testu32a * (UINT64)bigu32 < testu64a) {
printf("testu64a is %llu\n", testu64a);
testu64a /= 2;
}
return 0;
}
'''
self.do_run(src, 'testu64a is 14746250828952703000\n')
def test_i64_precise(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <inttypes.h>
#include <stdio.h>
int main() {
uint64_t x = 0, y = 0;
for (int i = 0; i < 64; i++) {
x += 1ULL << i;
y += x;
x /= 3;
y *= 5;
printf("unsigned %d: %llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n", i, x, y, x+y, x-y, x*y, y ? x/y : 0, x ? y/x : 0, y ? x%y : 0, x ? y%x : 0);
}
int64_t x2 = 0, y2 = 0;
for (int i = 0; i < 64; i++) {
x2 += 1LL << i;
y2 += x2;
x2 /= 3 * (i % 7 ? -1 : 1);
y2 *= 5 * (i % 2 ? -1 : 1);
printf("signed %d: %lld,%lld,%lld,%lld,%lld,%lld,%lld,%lld,%lld\n", i, x2, y2, x2+y2, x2-y2, x2*y2, y2 ? x2/y2 : 0, x2 ? y2/x2 : 0, y2 ? x2%y2 : 0, x2 ? y2%x2 : 0);
}
return 0;
}
'''
self.do_run(src, open(path_from_root('tests', 'i64_precise.txt')).read())
# Verify that without precision, we do not include the precision code
Settings.PRECISE_I64_MATH = 0
self.do_run(src, 'unsigned')
code = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
assert 'goog.math.Long' not in code and 'jsbn' not in code, 'i64 precise math should not have been included if not asked for'
# Verify that even if we ask for precision, if it is not needed it is not included
Settings.PRECISE_I64_MATH = 1
src = '''
#include <inttypes.h>
#include <stdio.h>
int main(int argc, char **argv) {
uint64_t x = 2125299906845564, y = 1225891506842664;
if (argc == 12) {
x = x >> 1;
y = y >> 1;
}
x = x & 12ULL;
y = y | 12ULL;
x = x ^ y;
x <<= 2;
y >>= 3;
printf("*%llu, %llu*\\n", x, y);
}
'''
self.do_run(src, '*4903566027370624, 153236438355333*')
code = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
assert 'goog.math.Long' not in code and 'jsbn' not in code, 'i64 precise math should not have been included if not actually used'
def test_i64_zextneg(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdint.h>
#include <stdio.h>
int main(int argc, char *argv[])
{
uint8_t byte = 0x80;
uint16_t two = byte;
uint32_t four = byte;
uint64_t eight = byte;
printf("value: %d,%d,%d,%lld.\n", byte, two, four, eight);
return 0;
}
'''
self.do_run(src, 'value: 128,128,128,128.')
def test_i64_7z(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdint.h>
#include <stdio.h>
uint64_t a, b;
int main(int argc, char *argv[])
{
a = argc;
b = argv[1][0];
if (a > a + b || a > a + b + 1) {
printf("one %lld, %lld", a, b);
return 0;
}
printf("zero %lld, %lld", a, b);
return 0;
}
'''
self.do_run(src, 'zero 2, 104', ['hallo'])
def test_cube2hash(self):
# A good test of i64 math
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2 C-style memory aliasing')
self.do_run('', 'Usage: hashstring <seed>',
libraries=self.get_library('cube2hash', ['cube2hash.bc'], configure=None),
includes=[path_from_root('tests', 'cube2hash')])
for text, output in [('fleefl', '892BDB6FD3F62E863D63DA55851700FDE3ACF30204798CE9'),
('fleefl2', 'AA2CC5F96FC9D540CA24FDAF1F71E2942753DB83E8A81B61'),
('64bitisslow', '64D8470573635EC354FEE7B7F87C566FCAF1EFB491041670')]:
self.do_run('', 'hash value: ' + output, [text], no_build=True)
def test_unaligned(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('No meaning to unaligned addresses in q1')
src = r'''
#include<stdio.h>
struct S {
double x;
int y;
};
int main() {
// the 64-bit value here will not always be 8-byte aligned
S s[3] = { {0x12a751f430142, 22}, {0x17a5c85bad144, 98}, {1, 1}};
printf("*%d : %d : %d\n", sizeof(S), ((unsigned int)&s[0]) % 8 != ((unsigned int)&s[1]) % 8,
((unsigned int)&s[1]) - ((unsigned int)&s[0]));
s[0].x++;
s[0].y++;
s[1].x++;
s[1].y++;
printf("%.1f,%d,%.1f,%d\n", s[0].x, s[0].y, s[1].x, s[1].y);
return 0;
}
'''
# TODO: A version of this with int64s as well
self.do_run(src, '*12 : 1 : 12\n328157500735811.0,23,416012775903557.0,99\n')
return # TODO: continue to the next part here
# Test for undefined behavior in C. This is not legitimate code, but does exist
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('No meaning to unaligned addresses without t2')
src = r'''
#include <stdio.h>
int main()
{
int x[10];
char *p = (char*)&x[0];
p++;
short *q = (short*)p;
*q = 300;
printf("*%d:%d*\n", *q, ((int)q)%2);
int *r = (int*)p;
*r = 515559;
printf("*%d*\n", *r);
long long *t = (long long*)p;
*t = 42949672960;
printf("*%Ld*\n", *t);
return 0;
}
'''
Settings.EMULATE_UNALIGNED_ACCESSES = 0
try:
self.do_run(src, '*300:1*\n*515559*\n*42949672960*\n')
except Exception, e:
assert 'must be aligned' in str(e), e # expected to fail without emulation
# XXX TODO Settings.EMULATE_UNALIGNED_ACCESSES = 1
#self.do_run(src, '*300:1*\n*515559*\n*42949672960*\n') # but succeeds with it
def test_unsigned(self):
Settings.CORRECT_SIGNS = 1 # We test for exactly this sort of thing here
Settings.CHECK_SIGNS = 0
src = '''
#include <stdio.h>
const signed char cvals[2] = { -1, -2 }; // compiler can store this is a string, so -1 becomes \FF, and needs re-signing
int main()
{
{
unsigned char x = 200;
printf("*%d*\\n", x);
unsigned char y = -22;
printf("*%d*\\n", y);
}
int varey = 100;
unsigned int MAXEY = -1, MAXEY2 = -77;
printf("*%u,%d,%u*\\n", MAXEY, varey >= MAXEY, MAXEY2); // 100 >= -1? not in unsigned!
int y = cvals[0];
printf("*%d,%d,%d,%d*\\n", cvals[0], cvals[0] < 0, y, y < 0);
y = cvals[1];
printf("*%d,%d,%d,%d*\\n", cvals[1], cvals[1] < 0, y, y < 0);
// zext issue - see mathop in jsifier
unsigned char x8 = -10;
unsigned long hold = 0;
hold += x8;
int y32 = hold+50;
printf("*%u,%u*\\n", hold, y32);
// Comparisons
x8 = 0;
for (int i = 0; i < 254; i++) x8++; // make it an actual 254 in JS - not a -2
printf("*%d,%d*\\n", x8+1 == 0xff, x8+1 != 0xff); // 0xff may be '-1' in the bitcode
return 0;
}
'''
self.do_run(src, '*4294967295,0,4294967219*\n*-1,1,-1,1*\n*-2,1,-2,1*\n*246,296*\n*1,0*')
# Now let's see some code that should just work in USE_TYPED_ARRAYS == 2, but requires
# corrections otherwise
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 0
Settings.CHECK_SIGNS = 1
else:
Settings.CORRECT_SIGNS = 1
Settings.CHECK_SIGNS = 0
src = '''
#include <stdio.h>
int main()
{
{
unsigned char x;
unsigned char *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
{
unsigned short x;
unsigned short *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
/*{ // This case is not checked. The hint for unsignedness is just the %u in printf, and we do not analyze that
unsigned int x;
unsigned int *y = &x;
*y = -1;
printf("*%u*\\n", x);
}*/
{
char x;
char *y = &x;
*y = 255;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 65535;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 0xffffffff;
printf("*%d*\\n", x);
}
return 0;
}
'''
self.do_run(src, '*255*\n*65535*\n*-1*\n*-1*\n*-1*')
def test_bitfields(self):
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # bitfields do loads on invalid areas, by design
src = '''
#include <stdio.h>
struct bitty {
unsigned x : 1;
unsigned y : 1;
unsigned z : 1;
};
int main()
{
bitty b;
printf("*");
for (int i = 0; i <= 1; i++)
for (int j = 0; j <= 1; j++)
for (int k = 0; k <= 1; k++) {
b.x = i;
b.y = j;
b.z = k;
printf("%d,%d,%d,", b.x, b.y, b.z);
}
printf("*\\n");
return 0;
}
'''
self.do_run(src, '*0,0,0,0,0,1,0,1,0,0,1,1,1,0,0,1,0,1,1,1,0,1,1,1,*')
def test_floatvars(self):
src = '''
#include <stdio.h>
int main()
{
float x = 1.234, y = 3.5, q = 0.00000001;
y *= 3;
int z = x < y;
printf("*%d,%d,%.1f,%d,%.4f,%.2f*\\n", z, int(y), y, (int)x, x, q);
/*
// Rounding behavior
float fs[6] = { -2.75, -2.50, -2.25, 2.25, 2.50, 2.75 };
double ds[6] = { -2.75, -2.50, -2.25, 2.25, 2.50, 2.75 };
for (int i = 0; i < 6; i++)
printf("*int(%.2f)=%d,%d*\\n", fs[i], int(fs[i]), int(ds[i]));
*/
return 0;
}
'''
self.do_run(src, '*1,10,10.5,1,1.2340,0.00*')
def test_math(self):
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
int main()
{
printf("*%.2f,%.2f,%d", M_PI, -M_PI, (1/0.0) > 1e300); // could end up as infinity, or just a very very big number
printf(",%d", finite(NAN) != 0);
printf(",%d", finite(INFINITY) != 0);
printf(",%d", finite(-INFINITY) != 0);
printf(",%d", finite(12.3) != 0);
printf(",%d", isinf(NAN) != 0);
printf(",%d", isinf(INFINITY) != 0);
printf(",%d", isinf(-INFINITY) != 0);
printf(",%d", isinf(12.3) != 0);
div_t div_result = div(23, 10);
printf(",%d", div_result.quot);
printf(",%d", div_result.rem);
double sine = -1.0, cosine = -1.0;
sincos(0.0, &sine, &cosine);
printf(",%1.1lf", sine);
printf(",%1.1lf", cosine);
float fsine = -1.0f, fcosine = -1.0f;
sincosf(0.0, &fsine, &fcosine);
printf(",%1.1f", fsine);
printf(",%1.1f", fcosine);
printf("*\\n");
return 0;
}
'''
self.do_run(src, '*3.14,-3.14,1,0,0,0,1,0,1,1,0,2,3,0.0,1.0,0.0,1.0*')
def test_math_hyperbolic(self):
src = open(path_from_root('tests', 'hyperbolic', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'hyperbolic', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_getgep(self):
# Generated code includes getelementptr (getelementptr, 0, 1), i.e., GEP as the first param to GEP
src = '''
#include <stdio.h>
struct {
int y[10];
int z[10];
} commonblock;
int main()
{
for (int i = 0; i < 10; ++i) {
commonblock.y[i] = 1;
commonblock.z[i] = 2;
}
printf("*%d %d*\\n", commonblock.y[0], commonblock.z[0]);
return 0;
}
'''
self.do_run(src, '*1 2*')
def test_multiply_defined_symbols(self):
a1 = "int f() { return 1; }"
a1_name = os.path.join(self.get_dir(), 'a1.c')
open(a1_name, 'w').write(a1)
a2 = "void x() {}"
a2_name = os.path.join(self.get_dir(), 'a2.c')
open(a2_name, 'w').write(a2)
b1 = "int f() { return 2; }"
b1_name = os.path.join(self.get_dir(), 'b1.c')
open(b1_name, 'w').write(b1)
b2 = "void y() {}"
b2_name = os.path.join(self.get_dir(), 'b2.c')
open(b2_name, 'w').write(b2)
main = r'''
#include <stdio.h>
int f();
int main() {
printf("result: %d\n", f());
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(main)
Building.emcc(a1_name)
Building.emcc(a2_name)
Building.emcc(b1_name)
Building.emcc(b2_name)
Building.emcc(main_name)
liba_name = os.path.join(self.get_dir(), 'liba.a')
Building.emar('cr', liba_name, [a1_name + '.o', a2_name + '.o'])
libb_name = os.path.join(self.get_dir(), 'libb.a')
Building.emar('cr', libb_name, [b1_name + '.o', b2_name + '.o'])
all_name = os.path.join(self.get_dir(), 'all.bc')
Building.link([main_name + '.o', liba_name, libb_name], all_name)
self.do_ll_run(all_name, 'result: 1')
def test_if(self):
src = '''
#include <stdio.h>
int main()
{
int x = 5;
if (x > 3) {
printf("*yes*\\n");
}
return 0;
}
'''
self.do_run(src, '*yes*')
def test_if_else(self):
src = '''
#include <stdio.h>
int main()
{
int x = 5;
if (x > 10) {
printf("*yes*\\n");
} else {
printf("*no*\\n");
}
return 0;
}
'''
self.do_run(src, '*no*')
def test_loop(self):
src = '''
#include <stdio.h>
int main()
{
int x = 5;
for (int i = 0; i < 6; i++) {
x += x*i;
if (x > 1000) {
if (x % 7 == 0) printf("cheez\\n");
x /= 2;
break;
}
}
printf("*%d*\\n", x);
return 0;
}
'''
self.do_run(src, '*1800*')
generated = open('src.cpp.o.js', 'r').read()
assert '__label__ ==' not in generated, 'We should hoist into the loop'
def test_stack(self):
src = '''
#include <stdio.h>
int test(int i) {
int x = 10;
if (i > 0) {
return test(i-1);
}
return int(&x); // both for the number, and forces x to not be nativized
}
int main()
{
// We should get the same value for the first and last - stack has unwound
int x1 = test(0);
int x2 = test(100);
int x3 = test(0);
printf("*%d,%d*\\n", x3-x1, x2 != x1);
return 0;
}
'''
self.do_run(src, '*0,1*')
def test_strings(self):
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char **argv)
{
int x = 5, y = 9, magic = 7; // fool compiler with magic
memmove(&x, &y, magic-7); // 0 should not crash us
int xx, yy, zz;
char s[32];
int cc = sscanf("abc_10.b1_xyz_543_defg", "abc_%d.%2x_xyz_%3d_%3s", &xx, &yy, &zz, s);
printf("%d:%d,%d,%d,%s\\n", cc, xx, yy, zz, s);
printf("%d\\n", argc);
puts(argv[1]);
puts(argv[2]);
printf("%d\\n", atoi(argv[3])+2);
const char *foolingthecompiler = "\\rabcd";
printf("%d\\n", strlen(foolingthecompiler)); // Tests parsing /0D in llvm - should not be a 0 (end string) then a D!
printf("%s\\n", NULL); // Should print '(null)', not the string at address 0, which is a real address for us!
printf("/* a comment */\\n"); // Should not break the generated code!
printf("// another\\n"); // Should not break the generated code!
char* strdup_val = strdup("test");
printf("%s\\n", strdup_val);
free(strdup_val);
return 0;
}
'''
self.do_run(src, '4:10,177,543,def\n4\nwowie\ntoo\n76\n5\n(null)\n/* a comment */\n// another\ntest\n', ['wowie', 'too', '74'])
def test_errar(self):
src = r'''
#include <stdio.h>
#include <errno.h>
#include <string.h>
int main() {
char* err;
char buffer[200];
err = strerror(EDOM);
strerror_r(EWOULDBLOCK, buffer, 200);
printf("<%s>\n", err);
printf("<%s>\n", buffer);
printf("<%d>\n", strerror_r(EWOULDBLOCK, buffer, 0));
errno = 123;
printf("<%d>\n", errno);
return 0;
}
'''
expected = '''
<Numerical argument out of domain>
<Resource temporarily unavailable>
<34>
<123>
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_mainenv(self):
src = '''
#include <stdio.h>
int main(int argc, char **argv, char **envp)
{
printf("*%p*\\n", envp);
return 0;
}
'''
self.do_run(src, '*(nil)*')
def test_funcs(self):
src = '''
#include <stdio.h>
int funcy(int x)
{
return x*9;
}
int main()
{
printf("*%d,%d*\\n", funcy(8), funcy(10));
return 0;
}
'''
self.do_run(src, '*72,90*')
def test_structs(self):
src = '''
#include <stdio.h>
struct S
{
int x, y;
};
int main()
{
S a, b;
a.x = 5; a.y = 6;
b.x = 101; b.y = 7009;
S *c, *d;
c = &a;
c->x *= 2;
c = &b;
c->y -= 1;
d = c;
d->y += 10;
printf("*%d,%d,%d,%d,%d,%d,%d,%d*\\n", a.x, a.y, b.x, b.y, c->x, c->y, d->x, d->y);
return 0;
}
'''
self.do_run(src, '*10,6,101,7018,101,7018,101,7018*')
gen_struct_src = '''
#include <stdio.h>
#include <stdlib.h>
#include "emscripten.h"
struct S
{
int x, y;
};
int main()
{
S* a = {{gen_struct}};
a->x = 51; a->y = 62;
printf("*%d,%d*\\n", a->x, a->y);
{{del_struct}}(a);
return 0;
}
'''
def test_mallocstruct(self):
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', '(S*)malloc(sizeof(S))').replace('{{del_struct}}', 'free'), '*51,62*')
def test_newstruct(self):
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', 'new S').replace('{{del_struct}}', 'delete'), '*51,62*')
def test_addr_of_stacked(self):
src = '''
#include <stdio.h>
void alter(int *y)
{
*y += 5;
}
int main()
{
int x = 2;
alter(&x);
printf("*%d*\\n", x);
return 0;
}
'''
self.do_run(src, '*7*')
def test_globals(self):
src = '''
#include <stdio.h>
char cache[256], *next = cache;
int main()
{
cache[10] = 25;
next[20] = 51;
printf("*%d,%d*\\n", next[10], cache[20]);
return 0;
}
'''
self.do_run(src, '*25,51*')
def test_linked_list(self):
src = '''
#include <stdio.h>
struct worker_args {
int value;
struct worker_args *next;
};
int main()
{
worker_args a;
worker_args b;
a.value = 60;
a.next = &b;
b.value = 900;
b.next = NULL;
worker_args* c = &a;
int total = 0;
while (c) {
total += c->value;
c = c->next;
}
// Chunk of em
worker_args chunk[10];
for (int i = 0; i < 9; i++) {
chunk[i].value = i*10;
chunk[i].next = &chunk[i+1];
}
chunk[9].value = 90;
chunk[9].next = &chunk[0];
c = chunk;
do {
total += c->value;
c = c->next;
} while (c != chunk);
printf("*%d,%d*\\n", total, b.next);
// NULL *is* 0, in C/C++. No JS null! (null == 0 is false, etc.)
return 0;
}
'''
self.do_run(src, '*1410,0*')
def test_sup(self):
src = '''
#include <stdio.h>
struct S4 { int x; }; // size: 4
struct S4_2 { short x, y; }; // size: 4, but for alignment purposes, 2
struct S6 { short x, y, z; }; // size: 6
struct S6w { char x[6]; }; // size: 6 also
struct S6z { int x; short y; }; // size: 8, since we align to a multiple of the biggest - 4
struct C___ { S6 a, b, c; int later; };
struct Carr { S6 a[3]; int later; }; // essentially the same, but differently defined
struct C__w { S6 a; S6w b; S6 c; int later; }; // same size, different struct
struct Cp1_ { int pre; short a; S6 b, c; int later; }; // fillers for a
struct Cp2_ { int a; short pre; S6 b, c; int later; }; // fillers for a (get addr of the other filler)
struct Cint { S6 a; int b; S6 c; int later; }; // An int (different size) for b
struct C4__ { S6 a; S4 b; S6 c; int later; }; // Same size as int from before, but a struct
struct C4_2 { S6 a; S4_2 b; S6 c; int later; }; // Same size as int from before, but a struct with max element size 2
struct C__z { S6 a; S6z b; S6 c; int later; }; // different size, 8 instead of 6
int main()
{
#define TEST(struc) \\
{ \\
struc *s = 0; \\
printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a), (int)&(s->b), (int)&(s->c), (int)&(s->later), sizeof(struc)); \\
}
#define TEST_ARR(struc) \\
{ \\
struc *s = 0; \\
printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a[0]), (int)&(s->a[1]), (int)&(s->a[2]), (int)&(s->later), sizeof(struc)); \\
}
printf("sizeofs:%d,%d\\n", sizeof(S6), sizeof(S6z));
TEST(C___);
TEST_ARR(Carr);
TEST(C__w);
TEST(Cp1_);
TEST(Cp2_);
TEST(Cint);
TEST(C4__);
TEST(C4_2);
TEST(C__z);
return 1;
}
'''
if Settings.QUANTUM_SIZE == 1:
self.do_run(src, 'sizeofs:6,8\n*C___: 0,3,6,9<24*\n*Carr: 0,3,6,9<24*\n*C__w: 0,3,9,12<24*\n*Cp1_: 1,2,5,8<24*\n*Cp2_: 0,2,5,8<24*\n*Cint: 0,3,4,7<24*\n*C4__: 0,3,4,7<24*\n*C4_2: 0,3,5,8<20*\n*C__z: 0,3,5,8<28*')
else:
self.do_run(src, 'sizeofs:6,8\n*C___: 0,6,12,20<24*\n*Carr: 0,6,12,20<24*\n*C__w: 0,6,12,20<24*\n*Cp1_: 4,6,12,20<24*\n*Cp2_: 0,6,12,20<24*\n*Cint: 0,8,12,20<24*\n*C4__: 0,8,12,20<24*\n*C4_2: 0,6,10,16<20*\n*C__z: 0,8,16,24<28*')
def test_assert(self):
src = '''
#include <stdio.h>
#include <assert.h>
int main() {
assert(1 == true); // pass
assert(1 == false); // fail
return 1;
}
'''
self.do_run(src, 'Assertion failed: 1 == false')
def test_longjmp(self):
src = r'''
#include <stdio.h>
#include <setjmp.h>
static jmp_buf buf;
void second(void) {
printf("second\n"); // prints
longjmp(buf,1); // jumps back to where setjmp was called - making setjmp now return 1
}
void first(void) {
second();
printf("first\n"); // does not print
}
int main() {
int x = 0;
if ( ! setjmp(buf) ) {
x++;
first(); // when executed, setjmp returns 0
} else { // when longjmp jumps back, setjmp returns 1
printf("main: %d\n", x); // prints
}
return 0;
}
'''
# gcc -O0 and -O2 differ in what they do with the saved state of local vars - and we match that
if self.emcc_args is None or ('-O1' not in self.emcc_args and '-O2' not in self.emcc_args):
self.do_run(src, 'second\nmain: 1\n')
else:
self.do_run(src, 'second\nmain: 0\n')
def test_exceptions(self):
if Settings.QUANTUM_SIZE == 1: return self.skip("we don't support libcxx in q1")
self.banned_js_engines = [NODE_JS] # node issue 1669, exception causes stdout not to be flushed
Settings.DISABLE_EXCEPTION_CATCHING = 0
if self.emcc_args is None:
if Building.LLVM_OPTS: return self.skip('optimizing bitcode before emcc can confuse libcxx inclusion')
self.emcc_args = [] # libc++ auto-inclusion is only done if we use emcc
else:
if '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
src = '''
#include <stdio.h>
void thrower() {
printf("infunc...");
throw(99);
printf("FAIL");
}
int main() {
try {
printf("*throw...");
throw(1);
printf("FAIL");
} catch(...) {
printf("caught!");
}
try {
thrower();
} catch(...) {
printf("done!*\\n");
}
return 1;
}
'''
self.do_run(src, '*throw...caught!infunc...done!*')
Settings.DISABLE_EXCEPTION_CATCHING = 1
self.do_run(src, 'Compiled code throwing an exception')
src = '''
#include <iostream>
class MyException
{
public:
MyException(){ std::cout << "Construct..."; }
MyException( const MyException & ) { std::cout << "Copy..."; }
~MyException(){ std::cout << "Destruct..."; }
};
int function()
{
std::cout << "Throw...";
throw MyException();
}
int function2()
{
return function();
}
int main()
{
try
{
function2();
}
catch (MyException & e)
{
std::cout << "Catched...";
}
try
{
function2();
}
catch (MyException e)
{
std::cout << "Catched...";
}
return 0;
}
'''
Settings.DISABLE_EXCEPTION_CATCHING = 0
self.do_run(src, 'Throw...Construct...Catched...Destruct...Throw...Construct...Copy...Catched...Destruct...Destruct...')
def test_uncaught_exception(self):
if self.emcc_args is None: return self.skip('no libcxx inclusion without emcc')
if '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
Settings.EXCEPTION_DEBUG = 0 # Messes up expected output.
Settings.DISABLE_EXCEPTION_CATCHING = 0
src = r'''
#include <stdio.h>
#include <exception>
struct X {
~X() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
};
int main() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
try {
X x;
throw 1;
} catch(...) {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
return 0;
}
'''
self.do_run(src, 'exception? no\nexception? yes\nexception? no\nexception? no\n')
src = r'''
#include <fstream>
#include <iostream>
int main() {
std::ofstream os("test");
os << std::unitbuf << "foo"; // trigger a call to std::uncaught_exception from
// std::basic_ostream::sentry::~sentry
std::cout << "success";
}
'''
self.do_run(src, 'success')
def test_typed_exceptions(self):
Settings.DISABLE_EXCEPTION_CATCHING = 0
Settings.SAFE_HEAP = 0 # Throwing null will cause an ignorable null pointer access.
Settings.EXCEPTION_DEBUG = 0 # Messes up expected output.
src = open(path_from_root('tests', 'exceptions', 'typed.cpp'), 'r').read()
expected = open(path_from_root('tests', 'exceptions', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_class(self):
src = '''
#include <stdio.h>
struct Random {
enum { IM = 139968, IA = 3877, IC = 29573 };
Random() : last(42) {}
float get( float max = 1.0f ) {
last = ( last * IA + IC ) % IM;
return max * last / IM;
}
protected:
unsigned int last;
} rng1;
int main()
{
Random rng2;
int count = 0;
for (int i = 0; i < 100; i++) {
float x1 = rng1.get();
float x2 = rng2.get();
printf("%f, %f\\n", x1, x2);
if (x1 != x2) count += 1;
}
printf("*%d*\\n", count);
return 0;
}
'''
self.do_run(src, '*0*')
def test_inherit(self):
src = '''
#include <stdio.h>
struct Parent {
int x1, x2;
};
struct Child : Parent {
int y;
};
int main()
{
Parent a;
a.x1 = 50;
a.x2 = 87;
Child b;
b.x1 = 78;
b.x2 = 550;
b.y = 101;
Child* c = (Child*)&a;
c->x1 ++;
c = &b;
c->y --;
printf("*%d,%d,%d,%d,%d,%d,%d*\\n", a.x1, a.x2, b.x1, b.x2, b.y, c->x1, c->x2);
return 0;
}
'''
self.do_run(src, '*51,87,78,550,100,78,550*')
def test_isdigit_l(self):
if self.emcc_args is None: return self.skip('no libcxx inclusion without emcc')
src = '''
#include <iostream>
int main() {
using namespace std;
use_facet<num_put<char> >(cout.getloc()).put(cout, cout, '0', 3.14159265);
}
'''
self.do_run(src, '3.14159')
def test_polymorph(self):
src = '''
#include <stdio.h>
struct Pure {
virtual int implme() = 0;
};
struct Parent : Pure {
virtual int getit() { return 11; };
int implme() { return 32; }
};
struct Child : Parent {
int getit() { return 74; }
int implme() { return 1012; }
};
struct Other {
int one() { return 11; }
int two() { return 22; }
};
int main()
{
Parent *x = new Parent();
Parent *y = new Child();
printf("*%d,%d,%d,%d*\\n", x->getit(), y->getit(), x->implme(), y->implme());
Other *o = new Other;
int (Other::*Ls)() = &Other::one;
printf("*%d*\\n", (o->*(Ls))());
Ls = &Other::two;
printf("*%d*\\n", (o->*(Ls))());
return 0;
}
'''
self.do_run(src, '*11,74,32,1012*\n*11*\n*22*')
def test_dynamic_cast(self):
if self.emcc_args is None: return self.skip('need libcxxabi')
src = r'''
#include <stdio.h>
struct Support {
virtual void f() {
printf("f()\n");
}
};
struct Derived : Support {
};
int main() {
Support * p = new Derived;
dynamic_cast<Derived*>(p)->f();
}
'''
self.do_run(src, 'f()\n')
src = '''
#include <stdio.h>
class CBase { virtual void dummy() {} };
class CDerived : public CBase { int a; };
class CDerivedest : public CDerived { float b; };
int main ()
{
CBase *pa = new CBase;
CBase *pb = new CDerived;
CBase *pc = new CDerivedest;
printf("a1: %d\\n", dynamic_cast<CDerivedest*>(pa) != NULL);
printf("a2: %d\\n", dynamic_cast<CDerived*>(pa) != NULL);
printf("a3: %d\\n", dynamic_cast<CBase*>(pa) != NULL);
printf("b1: %d\\n", dynamic_cast<CDerivedest*>(pb) != NULL);
printf("b2: %d\\n", dynamic_cast<CDerived*>(pb) != NULL);
printf("b3: %d\\n", dynamic_cast<CBase*>(pb) != NULL);
printf("c1: %d\\n", dynamic_cast<CDerivedest*>(pc) != NULL);
printf("c2: %d\\n", dynamic_cast<CDerived*>(pc) != NULL);
printf("c3: %d\\n", dynamic_cast<CBase*>(pc) != NULL);
return 0;
}
'''
self.do_run(src, 'a1: 0\na2: 0\na3: 1\nb1: 0\nb2: 1\nb3: 1\nc1: 1\nc2: 1\nc3: 1\n')
def test_funcptr(self):
src = '''
#include <stdio.h>
int calc1() { return 26; }
int calc2() { return 90; }
typedef int (*fp_t)();
fp_t globally1 = calc1;
fp_t globally2 = calc2;
int nothing(const char *str) { return 0; }
int main()
{
fp_t fp = calc1;
void *vp = (void*)fp;
fp_t fpb = (fp_t)vp;
fp_t fp2 = calc2;
void *vp2 = (void*)fp2;
fp_t fpb2 = (fp_t)vp2;
printf("*%d,%d,%d,%d,%d,%d*\\n", fp(), fpb(), fp2(), fpb2(), globally1(), globally2());
fp_t t = calc1;
printf("*%d,%d", t == calc1, t == calc2);
t = calc2;
printf(",%d,%d*\\n", t == calc1, t == calc2);
int (*other)(const char *str);
other = nothing;
other("*hello!*");
other = puts;
other("*goodbye!*");
return 0;
}
'''
self.do_run(src, '*26,26,90,90,26,90*\n*1,0,0,1*\n*goodbye!*')
def test_mathfuncptr(self):
src = '''
#include <math.h>
#include <stdio.h>
int
main(void) {
float (*fn)(float) = &sqrtf;
float (*fn2)(float) = &fabsf;
printf("fn2(-5) = %d, fn(10) = %f\\n", (int)fn2(-5), fn(10));
return 0;
}
'''
self.do_run(src, 'fn2(-5) = 5, fn(10) = 3.16')
def test_emptyclass(self):
src = '''
#include <stdio.h>
struct Randomized {
Randomized(int x) {
printf("*zzcheezzz*\\n");
}
};
int main( int argc, const char *argv[] ) {
new Randomized(55);
return 0;
}
'''
self.do_run(src, '*zzcheezzz*')
def test_alloca(self):
src = '''
#include <stdio.h>
#include <stdlib.h>
int main() {
char *pc;
pc = (char *)alloca(5);
printf("z:%d*%d*\\n", pc > 0, (int)pc);
return 0;
}
'''
self.do_run(src, 'z:1*', force_c=True)
def test_alloca_stack(self):
if self.emcc_args is None: return # too slow in other modes
# We should not blow up the stack with numerous allocas
src = '''
#include <stdio.h>
#include <stdlib.h>
func(int i) {
char *pc = (char *)alloca(100);
*pc = i;
(*pc)++;
return (*pc) % 10;
}
int main() {
int total = 0;
for (int i = 0; i < 1024*1024; i++)
total += func(i);
printf("ok:%d*\\n", total);
return 0;
}
'''
self.do_run(src, 'ok:-32768*', force_c=True)
def test_stack_byval(self):
if self.emcc_args is None: return # too slow in other modes
# We should also not blow up the stack with byval arguments
src = r'''
#include<stdio.h>
struct vec {
int x, y, z;
vec(int x_, int y_, int z_) : x(x_), y(y_), z(z_) {}
static vec add(vec a, vec b) {
return vec(a.x+b.x, a.y+b.y, a.z+b.z);
}
};
int main() {
int total = 0;
for (int i = 0; i < 1000; i++) {
for (int j = 0; j < 1000; j++) {
vec c(i+i%10, j*2, i%255);
vec d(j*2, j%255, i%120);
vec f = vec::add(c, d);
total += (f.x + f.y + f.z) % 100;
total %= 10240;
}
}
printf("sum:%d*\n", total);
return 1;
}
'''
self.do_run(src, 'sum:9780*')
def test_stack_varargs(self):
if self.emcc_args is None: return # too slow in other modes
# We should not blow up the stack with numerous varargs
src = r'''
#include <stdio.h>
#include <stdlib.h>
void func(int i) {
printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
}
int main() {
for (int i = 0; i < 1024; i++)
func(i);
printf("ok!\n");
return 0;
}
'''
Settings.TOTAL_STACK = 1024
self.do_run(src, 'ok!')
def test_array2(self):
src = '''
#include <stdio.h>
static const double grid[4][2] = {
{-3/3.,-1/3.},{+1/3.,-3/3.},
{-1/3.,+3/3.},{+3/3.,+1/3.}
};
int main() {
for (int i = 0; i < 4; i++)
printf("%d:%.2f,%.2f ", i, grid[i][0], grid[i][1]);
printf("\\n");
return 0;
}
'''
self.do_run(src, '0:-1.00,-0.33 1:0.33,-1.00 2:-0.33,1.00 3:1.00,0.33')
def test_array2b(self):
src = '''
#include <stdio.h>
static const struct {
unsigned char left;
unsigned char right;
} prioritah[] = {
{6, 6}, {6, 6}, {7, 95}, {7, 7}
};
int main() {
printf("*%d,%d\\n", prioritah[1].left, prioritah[1].right);
printf("%d,%d*\\n", prioritah[2].left, prioritah[2].right);
return 0;
}
'''
self.do_run(src, '*6,6\n7,95*')
def test_constglobalstructs(self):
src = '''
#include <stdio.h>
struct IUB {
int c;
double p;
unsigned int pi;
};
IUB iub[] = {
{ 'a', 0.27, 5 },
{ 'c', 0.15, 4 },
{ 'g', 0.12, 3 },
{ 't', 0.27, 2 },
};
const unsigned char faceedgesidx[6][4] =
{
{ 4, 5, 8, 10 },
{ 6, 7, 9, 11 },
{ 0, 2, 8, 9 },
{ 1, 3, 10,11 },
{ 0, 1, 4, 6 },
{ 2, 3, 5, 7 },
};
int main( int argc, const char *argv[] ) {
printf("*%d,%d,%d,%d*\\n", iub[0].c, int(iub[1].p*100), iub[2].pi, faceedgesidx[3][2]);
return 0;
}
'''
self.do_run(src, '*97,15,3,10*')
def test_conststructs(self):
src = '''
#include <stdio.h>
struct IUB {
int c;
double p;
unsigned int pi;
};
int main( int argc, const char *argv[] ) {
int before = 70;
IUB iub[] = {
{ 'a', 0.3029549426680, 5 },
{ 'c', 0.15, 4 },
{ 'g', 0.12, 3 },
{ 't', 0.27, 2 },
};
int after = 90;
printf("*%d,%d,%d,%d,%d,%d*\\n", before, iub[0].c, int(iub[1].p*100), iub[2].pi, int(iub[0].p*10000), after);
return 0;
}
'''
self.do_run(src, '*70,97,15,3,3029,90*')
def test_mod_globalstruct(self):
src = '''
#include <stdio.h>
struct malloc_params {
size_t magic, page_size;
};
malloc_params mparams;
#define SIZE_T_ONE ((size_t)1)
#define page_align(S) (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
int main()
{
mparams.page_size = 4096;
printf("*%d,%d,%d,%d*\\n", mparams.page_size, page_align(1000), page_align(6000), page_align(66474));
return 0;
}
'''
self.do_run(src, '*4096,4096,8192,69632*')
def test_pystruct(self):
src = '''
#include <stdio.h>
// Based on CPython code
union PyGC_Head {
struct {
union PyGC_Head *gc_next;
union PyGC_Head *gc_prev;
size_t gc_refs;
} gc;
long double dummy; /* force worst-case alignment */
} ;
struct gc_generation {
PyGC_Head head;
int threshold; /* collection threshold */
int count; /* count of allocations or collections of younger
generations */
};
#define NUM_GENERATIONS 3
#define GEN_HEAD(n) (&generations[n].head)
/* linked lists of container objects */
static struct gc_generation generations[NUM_GENERATIONS] = {
/* PyGC_Head, threshold, count */
{{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0},
{{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0},
{{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0},
};
int main()
{
gc_generation *n = NULL;
printf("*%d,%d,%d,%d,%d,%d,%d,%d*\\n",
(int)(&n[0]),
(int)(&n[0].head),
(int)(&n[0].head.gc.gc_next),
(int)(&n[0].head.gc.gc_prev),
(int)(&n[0].head.gc.gc_refs),
(int)(&n[0].threshold), (int)(&n[0].count), (int)(&n[1])
);
printf("*%d,%d,%d*\\n",
(int)(&generations[0]) ==
(int)(&generations[0].head.gc.gc_next),
(int)(&generations[0]) ==
(int)(&generations[0].head.gc.gc_prev),
(int)(&generations[0]) ==
(int)(&generations[1])
);
int x1 = (int)(&generations[0]);
int x2 = (int)(&generations[1]);
printf("*%d*\\n", x1 == x2);
for (int i = 0; i < NUM_GENERATIONS; i++) {
PyGC_Head *list = GEN_HEAD(i);
printf("%d:%d,%d\\n", i, (int)list == (int)(list->gc.gc_prev), (int)list ==(int)(list->gc.gc_next));
}
printf("*%d,%d,%d*\\n", sizeof(PyGC_Head), sizeof(gc_generation), int(GEN_HEAD(2)) - int(GEN_HEAD(1)));
}
'''
if Settings.QUANTUM_SIZE == 1:
# Compressed memory. Note that sizeof() does give the fat sizes, however!
self.do_run(src, '*0,0,0,1,2,3,4,5*\n*1,0,0*\n*0*\n0:1,1\n1:1,1\n2:1,1\n*12,20,5*')
else:
self.do_run(src, '*0,0,0,4,8,12,16,20*\n*1,0,0*\n*0*\n0:1,1\n1:1,1\n2:1,1\n*12,20,20*')
def test_ptrtoint(self):
src = '''
#include <stdio.h>
int main( int argc, const char *argv[] ) {
char *a = new char[10];
char *a0 = a+0;
char *a5 = a+5;
int *b = new int[10];
int *b0 = b+0;
int *b5 = b+5;
int c = (int)b5-(int)b0; // Emscripten should warn!
int d = (int)b5-(int)b0; // Emscripten should warn!
printf("*%d*\\n", (int)a5-(int)a0);
return 0;
}
'''
runner = self
def check_warnings(output):
runner.assertEquals(filter(lambda line: 'Warning' in line, output.split('\n')).__len__(), 4)
self.do_run(src, '*5*', output_processor=check_warnings)
def test_sizeof(self):
# Has invalid writes between printouts
Settings.SAFE_HEAP = 0
src = '''
#include <stdio.h>
#include <string.h>
#include "emscripten.h"
struct A { int x, y; };
int main( int argc, const char *argv[] ) {
int *a = new int[10];
int *b = new int[1];
int *c = new int[10];
for (int i = 0; i < 10; i++)
a[i] = 2;
*b = 5;
for (int i = 0; i < 10; i++)
c[i] = 8;
printf("*%d,%d,%d,%d,%d*\\n", a[0], a[9], *b, c[0], c[9]);
// Should overwrite a, but not touch b!
memcpy(a, c, 10*sizeof(int));
printf("*%d,%d,%d,%d,%d*\\n", a[0], a[9], *b, c[0], c[9]);
// Part 2
A as[3] = { { 5, 12 }, { 6, 990 }, { 7, 2 } };
memcpy(&as[0], &as[2], sizeof(A));
printf("*%d,%d,%d,%d,%d,%d*\\n", as[0].x, as[0].y, as[1].x, as[1].y, as[2].x, as[2].y);
return 0;
}
'''
self.do_run(src, '*2,2,5,8,8***8,8,5,8,8***7,2,6,990,7,2*', [], lambda x: x.replace('\n', '*'))
def test_emscripten_api(self):
#if Settings.MICRO_OPTS or Settings.RELOOP or Building.LLVM_OPTS: return self.skip('FIXME')
src = r'''
#include <stdio.h>
#include "emscripten.h"
int main() {
// EMSCRIPTEN_COMMENT("hello from the source");
emscripten_run_script("Module.print('hello world' + '!')");
printf("*%d*\n", emscripten_run_script_int("5*20"));
return 0;
}
'''
check = '''
def process(filename):
src = open(filename, 'r').read()
# TODO: restore this (see comment in emscripten.h) assert '// hello from the source' in src
'''
self.do_run(src, 'hello world!\n*100*', post_build=check)
def test_inlinejs(self):
src = r'''
#include <stdio.h>
int main() {
asm("Module.print('Inline JS is very cool')");
return 0;
}
'''
self.do_run(src, 'Inline JS is very cool')
def test_memorygrowth(self):
# With typed arrays in particular, it is dangerous to use more memory than TOTAL_MEMORY,
# since we then need to enlarge the heap(s).
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "emscripten.h"
int main()
{
char *buf1 = (char*)malloc(100);
char *data1 = "hello";
memcpy(buf1, data1, strlen(data1)+1);
float *buf2 = (float*)malloc(100);
float pie = 4.955;
memcpy(buf2, &pie, sizeof(float));
printf("*pre: %s,%.3f*\n", buf1, buf2[0]);
int totalMemory = emscripten_run_script_int("TOTAL_MEMORY");
char *buf3 = (char*)malloc(totalMemory+1);
char *buf4 = (char*)malloc(100);
float *buf5 = (float*)malloc(100);
//printf("totalMemory: %d bufs: %d,%d,%d,%d,%d\n", totalMemory, buf1, buf2, buf3, buf4, buf5);
assert((int)buf4 > (int)totalMemory && (int)buf5 > (int)totalMemory);
printf("*%s,%.3f*\n", buf1, buf2[0]); // the old heap data should still be there
memcpy(buf4, buf1, strlen(data1)+1);
memcpy(buf5, buf2, sizeof(float));
printf("*%s,%.3f*\n", buf4, buf5[0]); // and the new heap space should work too
return 0;
}
'''
self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
def test_ssr(self): # struct self-ref
src = '''
#include <stdio.h>
// see related things in openjpeg
typedef struct opj_mqc_state {
unsigned int qeval;
int mps;
struct opj_mqc_state *nmps;
struct opj_mqc_state *nlps;
} opj_mqc_state_t;
static opj_mqc_state_t mqc_states[2] = {
{0x5600, 0, &mqc_states[2], &mqc_states[3]},
{0x5602, 1, &mqc_states[3], &mqc_states[2]},
};
int main() {
printf("*%d*\\n", (int)(mqc_states+1)-(int)mqc_states);
for (int i = 0; i < 2; i++)
printf("%d:%d,%d,%d,%d\\n", i, mqc_states[i].qeval, mqc_states[i].mps,
(int)mqc_states[i].nmps-(int)mqc_states, (int)mqc_states[i].nlps-(int)mqc_states);
return 0;
}
'''
if Settings.QUANTUM_SIZE == 1:
self.do_run(src, '''*4*\n0:22016,0,8,12\n1:22018,1,12,8\n''')
else:
self.do_run(src, '''*16*\n0:22016,0,32,48\n1:22018,1,48,32\n''')
def test_tinyfuncstr(self):
src = '''
#include <stdio.h>
struct Class {
static char *name1() { return "nameA"; }
char *name2() { return "nameB"; }
};
int main() {
printf("*%s,%s*\\n", Class::name1(), (new Class())->name2());
return 0;
}
'''
self.do_run(src, '*nameA,nameB*')
def test_llvmswitch(self):
Settings.CORRECT_SIGNS = 1
src = '''
#include <stdio.h>
#include <string.h>
int switcher(int p)
{
switch(p) {
case 'a':
case 'b':
case 'c':
return p-1;
case 0xfffffff1:
return p+1;
}
return p;
}
int main( int argc, const char *argv[] ) {
unsigned int x = 0xfffffff1;
x >>= 0; // force it to be unsigned for purpose of checking our switch comparison in signed/unsigned
printf("*%d,%d,%d,%d,%d,%d*\\n", switcher('a'), switcher('b'), switcher('c'), switcher(x), switcher(-15), switcher('e'));
return 0;
}
'''
self.do_run(src, '*96,97,98,-14,-14,101*')
def test_indirectbr(self):
src = '''
#include <stdio.h>
int main(void) {
const void *addrs[2] = { &&FOO, &&BAR };
// confuse the optimizer so it doesn't hardcode the jump and avoid generating an |indirectbr| instruction
int which = 0;
for (int x = 0; x < 1000; x++) which = (which + x*x) % 7;
which = (which % 2) + 1;
goto *addrs[which];
FOO:
printf("bad\\n");
return 1;
BAR:
printf("good\\n");
const void *addr = &&FOO;
goto *addr;
}
'''
self.do_run(src, 'good\nbad')
def test_pack(self):
src = '''
#include <stdio.h>
#include <string.h>
#pragma pack(push,1)
typedef struct header
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} header;
#pragma pack(pop)
typedef struct fatheader
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} fatheader;
int main( int argc, const char *argv[] ) {
header h, *ph = 0;
fatheader fh, *pfh = 0;
printf("*%d,%d,%d*\\n", sizeof(header), (int)((int)&h.desc - (int)&h.id), (int)(&ph[1])-(int)(&ph[0]));
printf("*%d,%d,%d*\\n", sizeof(fatheader), (int)((int)&fh.desc - (int)&fh.id), (int)(&pfh[1])-(int)(&pfh[0]));
return 0;
}
'''
if Settings.QUANTUM_SIZE == 1:
self.do_run(src, '*4,2,3*\n*6,2,3*')
else:
self.do_run(src, '*4,3,4*\n*6,4,6*')
def test_varargs(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('FIXME: Add support for this')
src = '''
#include <stdio.h>
#include <stdarg.h>
void vary(const char *s, ...)
{
va_list v;
va_start(v, s);
char d[20];
vsnprintf(d, 20, s, v);
puts(d);
// Try it with copying
va_list tempva;
__va_copy(tempva, v);
vsnprintf(d, 20, s, tempva);
puts(d);
va_end(v);
}
void vary2(char color, const char *s, ...)
{
va_list v;
va_start(v, s);
char d[21];
d[0] = color;
vsnprintf(d+1, 20, s, v);
puts(d);
va_end(v);
}
#define GETMAX(pref, type) \
type getMax##pref(int num, ...) \
{ \
va_list vv; \
va_start(vv, num); \
type maxx = va_arg(vv, type); \
for (int i = 1; i < num; i++) \
{ \
type curr = va_arg(vv, type); \
maxx = curr > maxx ? curr : maxx; \
} \
va_end(vv); \
return maxx; \
}
GETMAX(i, int);
GETMAX(D, double);
int main() {
vary("*cheez: %d+%d*", 0, 24); // Also tests that '0' is not special as an array ender
vary("*albeit*"); // Should not fail with no var args in vararg function
vary2('Q', "%d*", 85);
int maxxi = getMaxi(6, 2, 5, 21, 4, -10, 19);
printf("maxxi:%d*\\n", maxxi);
double maxxD = getMaxD(6, (double)2.1, (double)5.1, (double)22.1, (double)4.1, (double)-10.1, (double)19.1);
printf("maxxD:%.2f*\\n", (float)maxxD);
// And, as a function pointer
void (*vfp)(const char *s, ...) = vary;
vfp("*vfp:%d,%d*", 22, 199);
return 0;
}
'''
self.do_run(src, '*cheez: 0+24*\n*cheez: 0+24*\n*albeit*\n*albeit*\nQ85*\nmaxxi:21*\nmaxxD:22.10*\n*vfp:22,199*\n*vfp:22,199*\n')
def test_structbyval(self):
# part 1: make sure that normally, passing structs by value works
src = r'''
#include <stdio.h>
struct point
{
int x, y;
};
void dump(struct point p) {
p.x++; // should not modify
p.y++; // anything in the caller!
printf("dump: %d,%d\n", p.x, p.y);
}
void dumpmod(struct point *p) {
p->x++; // should not modify
p->y++; // anything in the caller!
printf("dump: %d,%d\n", p->x, p->y);
}
int main( int argc, const char *argv[] ) {
point p = { 54, 2 };
printf("pre: %d,%d\n", p.x, p.y);
dump(p);
void (*dp)(point p) = dump; // And, as a function pointer
dp(p);
printf("post: %d,%d\n", p.x, p.y);
dumpmod(&p);
dumpmod(&p);
printf("last: %d,%d\n", p.x, p.y);
return 0;
}
'''
self.do_run(src, 'pre: 54,2\ndump: 55,3\ndump: 55,3\npost: 54,2\ndump: 55,3\ndump: 56,4\nlast: 56,4')
# Check for lack of warning in the generated code (they should appear in part 2)
generated = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
assert 'Casting a function pointer type to another with a different number of arguments.' not in generated, 'Unexpected warning'
# part 2: make sure we warn about mixing c and c++ calling conventions here
if not (self.emcc_args is None or self.emcc_args == []): return # Optimized code is missing the warning comments
header = r'''
struct point
{
int x, y;
};
'''
open(os.path.join(self.get_dir(), 'header.h'), 'w').write(header)
supp = r'''
#include <stdio.h>
#include "header.h"
void dump(struct point p) {
p.x++; // should not modify
p.y++; // anything in the caller!
printf("dump: %d,%d\n", p.x, p.y);
}
'''
supp_name = os.path.join(self.get_dir(), 'supp.c')
open(supp_name, 'w').write(supp)
main = r'''
#include <stdio.h>
#include "header.h"
#ifdef __cplusplus
extern "C" {
#endif
void dump(struct point p);
#ifdef __cplusplus
}
#endif
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
printf("pre: %d,%d\n", p.x, p.y);
dump(p);
void (*dp)(struct point p) = dump; // And, as a function pointer
dp(p);
printf("post: %d,%d\n", p.x, p.y);
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.cpp')
open(main_name, 'w').write(main)
Building.emcc(supp_name)
Building.emcc(main_name)
all_name = os.path.join(self.get_dir(), 'all.bc')
Building.link([supp_name + '.o', main_name + '.o'], all_name)
# This will fail! See explanation near the warning we check for, in the compiler source code
output = Popen(['python', EMCC, all_name], stderr=PIPE).communicate()
# Check for warning in the generated code
generated = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
assert 'Casting a function pointer type to another with a different number of arguments' in output[1], 'Missing expected warning'
def test_stdlibs(self):
if Settings.USE_TYPED_ARRAYS == 2:
# Typed arrays = 2 + safe heap prints a warning that messes up our output.
Settings.SAFE_HEAP = 0
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
void clean()
{
printf("*cleaned*\\n");
}
int comparer(const void *a, const void *b) {
int aa = *((int*)a);
int bb = *((int*)b);
return aa - bb;
}
int main() {
// timeofday
timeval t;
gettimeofday(&t, NULL);
printf("*%d,%d\\n", int(t.tv_sec), int(t.tv_usec)); // should not crash
// atexit
atexit(clean);
// qsort
int values[6] = { 3, 2, 5, 1, 5, 6 };
qsort(values, 5, sizeof(int), comparer);
printf("*%d,%d,%d,%d,%d,%d*\\n", values[0], values[1], values[2], values[3], values[4], values[5]);
printf("*stdin==0:%d*\\n", stdin == 0); // check that external values are at least not NULL
printf("*%%*\\n");
printf("*%.1ld*\\n", 5);
printf("*%.1f*\\n", strtod("66", NULL)); // checks dependency system, as our strtod needs _isspace etc.
printf("*%ld*\\n", strtol("10", NULL, 0));
printf("*%ld*\\n", strtol("0", NULL, 0));
printf("*%ld*\\n", strtol("-10", NULL, 0));
printf("*%ld*\\n", strtol("12", NULL, 16));
printf("*%lu*\\n", strtoul("10", NULL, 0));
printf("*%lu*\\n", strtoul("0", NULL, 0));
printf("*%lu*\\n", strtoul("-10", NULL, 0));
printf("*malloc(0)!=0:%d*\\n", malloc(0) != 0); // We should not fail horribly
return 0;
}
'''
self.do_run(src, '*1,2,3,5,5,6*\n*stdin==0:0*\n*%*\n*5*\n*66.0*\n*10*\n*0*\n*-10*\n*18*\n*10*\n*0*\n*4294967286*\n*malloc(0)!=0:1*\n*cleaned*')
src = r'''
#include <stdio.h>
#include <stdbool.h>
int main() {
bool x = true;
bool y = false;
printf("*%d*\n", x != y);
return 0;
}
'''
self.do_run(src, '*1*', force_c=True)
def test_atexit(self):
# Confirms they are called in reverse order
src = r'''
#include <stdio.h>
#include <stdlib.h>
static void cleanA() {
printf("A");
}
static void cleanB() {
printf("B");
}
int main() {
atexit(cleanA);
atexit(cleanB);
return 0;
}
'''
self.do_run(src, 'BA')
def test_time(self):
# XXX Not sure what the right output is here. Looks like the test started failing with daylight savings changes. Modified it to pass again.
src = open(path_from_root('tests', 'time', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'time', 'output.txt'), 'r').read()
self.do_run(src, expected,
extra_emscripten_args=['-H', 'libc/time.h'])
#extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/langinfo.h,libc/time.h'])
def test_statics(self):
# static initializers save i16 but load i8 for some reason
if Settings.SAFE_HEAP:
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ['src.cpp:19', 'src.cpp:26']
src = '''
#include <stdio.h>
#include <string.h>
#define CONSTRLEN 32
void conoutfv(const char *fmt)
{
static char buf[CONSTRLEN];
strcpy(buf, fmt);
puts(buf);
}
struct XYZ {
float x, y, z;
XYZ(float a, float b, float c) : x(a), y(b), z(c) { }
static const XYZ& getIdentity()
{
static XYZ iT(1,2,3);
return iT;
}
};
struct S {
static const XYZ& getIdentity()
{
static const XYZ iT(XYZ::getIdentity());
return iT;
}
};
int main() {
conoutfv("*staticccz*");
printf("*%.2f,%.2f,%.2f*\\n", S::getIdentity().x, S::getIdentity().y, S::getIdentity().z);
return 0;
}
'''
self.do_run(src, '*staticccz*\n*1.00,2.00,3.00*')
def test_copyop(self):
# clang generated code is vulnerable to this, as it uses
# memcpy for assignments, with hardcoded numbers of bytes
# (llvm-gcc copies items one by one). See QUANTUM_SIZE in
# settings.js.
src = '''
#include <stdio.h>
#include <math.h>
#include <string.h>
struct vec {
double x,y,z;
vec() : x(0), y(0), z(0) { };
vec(const double a, const double b, const double c) : x(a), y(b), z(c) { };
};
struct basis {
vec a, b, c;
basis(const vec& v) {
a=v; // should not touch b!
printf("*%.2f,%.2f,%.2f*\\n", b.x, b.y, b.z);
}
};
int main() {
basis B(vec(1,0,0));
// Part 2: similar problem with memset and memmove
int x = 1, y = 77, z = 2;
memset((void*)&x, 0, sizeof(int));
memset((void*)&z, 0, sizeof(int));
printf("*%d,%d,%d*\\n", x, y, z);
memcpy((void*)&x, (void*)&z, sizeof(int));
memcpy((void*)&z, (void*)&x, sizeof(int));
printf("*%d,%d,%d*\\n", x, y, z);
memmove((void*)&x, (void*)&z, sizeof(int));
memmove((void*)&z, (void*)&x, sizeof(int));
printf("*%d,%d,%d*\\n", x, y, z);
return 0;
}
'''
self.do_run(src, '*0.00,0.00,0.00*\n*0,77,0*\n*0,77,0*\n*0,77,0*')
def test_memcpy(self):
src = '''
#include <stdio.h>
#include <string.h>
#define MAXX 48
void reset(unsigned char *buffer) {
for (int i = 0; i < MAXX; i++) buffer[i] = i+1;
}
void dump(unsigned char *buffer) {
for (int i = 0; i < MAXX-1; i++) printf("%2d,", buffer[i]);
printf("%d\\n", buffer[MAXX-1]);
}
int main() {
unsigned char buffer[MAXX];
for (int i = MAXX/4; i < MAXX-MAXX/4; i++) {
for (int j = MAXX/4; j < MAXX-MAXX/4; j++) {
for (int k = 1; k < MAXX/4; k++) {
if (i == j) continue;
if (i < j && i+k > j) continue;
if (j < i && j+k > i) continue;
printf("[%d,%d,%d] ", i, j, k);
reset(buffer);
memcpy(buffer+i, buffer+j, k);
dump(buffer);
}
}
}
return 0;
}
'''
def check(result):
return hashlib.sha1(result).hexdigest()
self.do_run(src, '6c9cdfe937383b79e52ca7a2cce83a21d9f5422c',
output_nicerizer = check)
def test_memmove(self):
src = '''
#include <stdio.h>
#include <string.h>
int main() {
char str[] = "memmove can be very useful....!";
memmove (str+20, str+15, 11);
puts(str);
return 0;
}
'''
self.do_run(src, 'memmove can be very very useful')
def test_bsearch(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('Test cannot work with q1')
src = '''
#include <stdlib.h>
#include <stdio.h>
int cmp(const void* key, const void* member) {
return *(int *)key - *(int *)member;
}
void printResult(int* needle, int* haystack, unsigned int len) {
void *result = bsearch(needle, haystack, len, sizeof(unsigned int), cmp);
if (result == NULL) {
printf("null\\n");
} else {
printf("%d\\n", *(unsigned int *)result);
}
}
int main() {
int a[] = { -2, -1, 0, 6, 7, 9 };
int b[] = { 0, 1 };
/* Find all keys that exist. */
for(int i = 0; i < 6; i++) {
int val = a[i];
printResult(&val, a, 6);
}
/* Keys that are covered by the range of the array but aren't in
* the array cannot be found.
*/
int v1 = 3;
int v2 = 8;
printResult(&v1, a, 6);
printResult(&v2, a, 6);
/* Keys outside the range of the array cannot be found. */
int v3 = -1;
int v4 = 2;
printResult(&v3, b, 2);
printResult(&v4, b, 2);
return 0;
}
'''
self.do_run(src, '-2\n-1\n0\n6\n7\n9\nnull\nnull\nnull\nnull')
def test_nestedstructs(self):
src = '''
#include <stdio.h>
#include "emscripten.h"
struct base {
int x;
float y;
union {
int a;
float b;
};
char c;
};
struct hashtableentry {
int key;
base data;
};
struct hashset {
typedef hashtableentry entry;
struct chain { entry elem; chain *next; };
// struct chainchunk { chain chains[100]; chainchunk *next; };
};
struct hashtable : hashset {
hashtable() {
base *b = NULL;
entry *e = NULL;
chain *c = NULL;
printf("*%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n",
sizeof(base),
int(&(b->x)), int(&(b->y)), int(&(b->a)), int(&(b->b)), int(&(b->c)),
sizeof(hashtableentry),
int(&(e->key)), int(&(e->data)), int(&(e->data.x)), int(&(e->data.y)), int(&(e->data.a)), int(&(e->data.b)), int(&(e->data.c)),
sizeof(hashset::chain),
int(&(c->elem)), int(&(c->next)), int(&(c->elem.key)), int(&(c->elem.data)), int(&(c->elem.data.x)), int(&(c->elem.data.y)), int(&(c->elem.data.a)), int(&(c->elem.data.b)), int(&(c->elem.data.c))
);
}
};
struct B { char buffer[62]; int last; char laster; char laster2; };
struct Bits {
unsigned short A : 1;
unsigned short B : 1;
unsigned short C : 1;
unsigned short D : 1;
unsigned short x1 : 1;
unsigned short x2 : 1;
unsigned short x3 : 1;
unsigned short x4 : 1;
};
int main() {
hashtable t;
// Part 2 - the char[] should be compressed, BUT have a padding space at the end so the next
// one is aligned properly. Also handle char; char; etc. properly.
B *b = NULL;
printf("*%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n", int(b), int(&(b->buffer)), int(&(b->buffer[0])), int(&(b->buffer[1])), int(&(b->buffer[2])),
int(&(b->last)), int(&(b->laster)), int(&(b->laster2)), sizeof(B));
// Part 3 - bitfields, and small structures
Bits *b2 = NULL;
printf("*%d*\\n", sizeof(Bits));
return 0;
}
'''
if Settings.QUANTUM_SIZE == 1:
# Compressed memory. Note that sizeof() does give the fat sizes, however!
self.do_run(src, '*16,0,1,2,2,3|20,0,1,1,2,3,3,4|24,0,5,0,1,1,2,3,3,4*\n*0,0,0,1,2,62,63,64,72*\n*2*')
else:
# Bloated memory; same layout as C/C++
self.do_run(src, '*16,0,4,8,8,12|20,0,4,4,8,12,12,16|24,0,20,0,4,4,8,12,12,16*\n*0,0,0,1,2,64,68,69,72*\n*2*')
def test_runtimelink(self):
if Building.LLVM_OPTS: return self.skip('LLVM opts will optimize printf into puts in the parent, and the child will still look for puts')
Settings.LINKABLE = 1
self.banned_js_engines = [NODE_JS] # node's global scope behaves differently than everything else, needs investigation FIXME
header = r'''
struct point
{
int x, y;
};
'''
open(os.path.join(self.get_dir(), 'header.h'), 'w').write(header)
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x+p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
supp_name = os.path.join(self.get_dir(), 'supp.c')
open(supp_name, 'w').write(supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 2
dirname = self.get_dir()
self.build(supp, dirname, supp_name)
shutil.move(supp_name + '.o.js', os.path.join(dirname, 'liblib.so'))
Settings.BUILD_AS_SHARED_LIB = 0
Settings.RUNTIME_LINKED_LIBS = ['liblib.so'];
self.do_run(main, 'supp: 54,2\nmain: 56\nsupp see: 543\nmain see: 76\nok.')
def test_dlfcn_basic(self):
Settings.LINKABLE = 1
lib_src = '''
#include <cstdio>
class Foo {
public:
Foo() {
printf("Constructing lib object.\\n");
}
};
Foo global;
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = '''
#include <cstdio>
#include <dlfcn.h>
class Bar {
public:
Bar() {
printf("Constructing main object.\\n");
}
};
Bar global;
int main() {
dlopen("liblib.so", RTLD_NOW);
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'Constructing main object.\nConstructing lib object.\n',
post_build=add_pre_run_and_checks)
def test_dlfcn_qsort(self):
Settings.LINKABLE = 1
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1 # Needed for unsafe optimizations
lib_src = '''
int lib_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a > *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
typedef int (*CMP_TYPE)(const void*, const void*);
extern "C" CMP_TYPE get_cmp() {
return lib_cmp;
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_get_cmp']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
typedef int (*CMP_TYPE)(const void*, const void*);
int main_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a < *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
int main() {
void* lib_handle;
CMP_TYPE (*getter_ptr)();
CMP_TYPE lib_cmp_ptr;
int arr[5] = {4, 2, 5, 1, 3};
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\\n");
return 1;
}
getter_ptr = (CMP_TYPE (*)()) dlsym(lib_handle, "get_cmp");
if (getter_ptr == NULL) {
printf("Could not find func.\\n");
return 1;
}
lib_cmp_ptr = getter_ptr();
qsort((void*)arr, 5, sizeof(int), main_cmp);
printf("Sort with main comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
qsort((void*)arr, 5, sizeof(int), lib_cmp_ptr);
printf("Sort with lib comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.EXPORTED_FUNCTIONS = ['_main']
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'Sort with main comparison: 5 4 3 2 1 *Sort with lib comparison: 1 2 3 4 5 *',
output_nicerizer=lambda x: x.replace('\n', '*'),
post_build=add_pre_run_and_checks)
def test_dlfcn_data_and_fptr(self):
if Building.LLVM_OPTS: return self.skip('LLVM opts will optimize out parent_func')
Settings.LINKABLE = 1
lib_src = '''
#include <stdio.h>
int global = 42;
extern void parent_func(); // a function that is defined in the parent
void lib_fptr() {
printf("Second calling lib_fptr from main.\\n");
parent_func();
// call it also through a pointer, to check indexizing
void (*p_f)();
p_f = parent_func;
p_f();
}
extern "C" void (*func(int x, void(*fptr)()))() {
printf("In func: %d\\n", x);
fptr();
return lib_fptr;
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_func']
Settings.EXPORTED_GLOBALS = ['_global']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = '''
#include <stdio.h>
#include <dlfcn.h>
typedef void (*FUNCTYPE(int, void(*)()))();
FUNCTYPE func;
void parent_func() {
printf("parent_func called from child\\n");
}
void main_fptr() {
printf("First calling main_fptr from lib.\\n");
}
int main() {
void* lib_handle;
FUNCTYPE* func_fptr;
// Test basic lib loading.
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\\n");
return 1;
}
// Test looked up function.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
// Load twice to test cache.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
if (func_fptr == NULL) {
printf("Could not find func.\\n");
return 1;
}
// Test passing function pointers across module bounds.
void (*fptr)() = func_fptr(13, main_fptr);
fptr();
// Test global data.
int* global = (int*) dlsym(lib_handle, "global");
if (global == NULL) {
printf("Could not find global.\\n");
return 1;
}
printf("Var: %d\\n", *global);
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.EXPORTED_FUNCTIONS = ['_main']
Settings.EXPORTED_GLOBALS = []
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'In func: 13*First calling main_fptr from lib.*Second calling lib_fptr from main.*parent_func called from child*parent_func called from child*Var: 42*',
output_nicerizer=lambda x: x.replace('\n', '*'),
post_build=add_pre_run_and_checks)
def test_dlfcn_alias(self):
Settings.LINKABLE = 1
if Building.LLVM_OPTS == 2: return self.skip('LLVM LTO will optimize away stuff we expect from the shared library')
lib_src = r'''
#include <stdio.h>
extern int parent_global;
extern "C" void func() {
printf("Parent global: %d.\n", parent_global);
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_func']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = r'''
#include <dlfcn.h>
int parent_global = 123;
int main() {
void* lib_handle;
void (*fptr)();
lib_handle = dlopen("liblib.so", RTLD_NOW);
fptr = (void (*)())dlsym(lib_handle, "func");
fptr();
parent_global = 456;
fptr();
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.INCLUDE_FULL_LIBRARY = 1
Settings.EXPORTED_FUNCTIONS = ['_main']
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'Parent global: 123.*Parent global: 456.*',
output_nicerizer=lambda x: x.replace('\n', '*'),
post_build=add_pre_run_and_checks,
extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/time.h,libc/langinfo.h'])
Settings.INCLUDE_FULL_LIBRARY = 0
def test_dlfcn_varargs(self):
Settings.LINKABLE = 1
if Building.LLVM_OPTS == 2: return self.skip('LLVM LTO will optimize things that prevent shared objects from working')
if Settings.QUANTUM_SIZE == 1: return self.skip('FIXME: Add support for this')
lib_src = r'''
void print_ints(int n, ...);
extern "C" void func() {
print_ints(2, 13, 42);
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_func']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = r'''
#include <stdarg.h>
#include <stdio.h>
#include <dlfcn.h>
void print_ints(int n, ...) {
va_list args;
va_start(args, n);
for (int i = 0; i < n; i++) {
printf("%d\n", va_arg(args, int));
}
va_end(args);
}
int main() {
void* lib_handle;
void (*fptr)();
print_ints(2, 100, 200);
lib_handle = dlopen("liblib.so", RTLD_NOW);
fptr = (void (*)())dlsym(lib_handle, "func");
fptr();
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.EXPORTED_FUNCTIONS = ['_main']
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, '100\n200\n13\n42\n',
post_build=add_pre_run_and_checks)
def test_rand(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
printf("%d\n", rand());
printf("%d\n", rand());
srand(123);
printf("%d\n", rand());
printf("%d\n", rand());
srand(123);
printf("%d\n", rand());
printf("%d\n", rand());
unsigned state = 0;
int r;
r = rand_r(&state);
printf("%d, %u\n", r, state);
r = rand_r(&state);
printf("%d, %u\n", r, state);
state = 0;
r = rand_r(&state);
printf("%d, %u\n", r, state);
return 0;
}
'''
expected = '''
1250496027
1116302336
440917656
1476150784
440917656
1476150784
12345, 12345
1406932606, 3554416254
12345, 12345
'''
self.do_run(src, re.sub(r'(^|\n)\s+', r'\1', expected))
def test_strtod(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
char* endptr;
printf("\n");
printf("%g\n", strtod("0", &endptr));
printf("%g\n", strtod("0.", &endptr));
printf("%g\n", strtod("0.0", &endptr));
printf("%g\n", strtod("-0.0", &endptr));
printf("%g\n", strtod("1", &endptr));
printf("%g\n", strtod("1.", &endptr));
printf("%g\n", strtod("1.0", &endptr));
printf("%g\n", strtod("123", &endptr));
printf("%g\n", strtod("123.456", &endptr));
printf("%g\n", strtod("-123.456", &endptr));
printf("%g\n", strtod("1234567891234567890", &endptr));
printf("%g\n", strtod("1234567891234567890e+50", &endptr));
printf("%g\n", strtod("84e+220", &endptr));
printf("%g\n", strtod("123e-50", &endptr));
printf("%g\n", strtod("123e-250", &endptr));
printf("%g\n", strtod("123e-450", &endptr));
char str[] = " 12.34e56end";
printf("%g\n", strtod(str, &endptr));
printf("%d\n", endptr - str);
printf("%g\n", strtod("84e+420", &endptr));
return 0;
}
'''
expected = '''
0
0
0
0
1
1
1
123
123.456
-123.456
1.23457e+18
1.23457e+68
8.4e+221
1.23e-48
1.23e-248
0
1.234e+57
10
inf
'''
self.do_run(src, re.sub(r'\n\s+', '\n', expected))
self.do_run(src.replace('strtod', 'strtold'), re.sub(r'\n\s+', '\n', expected)) # XXX add real support for long double
def test_strtok(self):
src = r'''
#include<stdio.h>
#include<string.h>
int main() {
char test[80], blah[80];
char *sep = "\\/:;=-";
char *word, *phrase, *brkt, *brkb;
strcpy(test, "This;is.a:test:of=the/string\\tokenizer-function.");
for (word = strtok_r(test, sep, &brkt); word; word = strtok_r(NULL, sep, &brkt)) {
strcpy(blah, "blah:blat:blab:blag");
for (phrase = strtok_r(blah, sep, &brkb); phrase; phrase = strtok_r(NULL, sep, &brkb)) {
printf("at %s:%s\n", word, phrase);
}
}
return 1;
}
'''
expected = '''at This:blah
at This:blat
at This:blab
at This:blag
at is.a:blah
at is.a:blat
at is.a:blab
at is.a:blag
at test:blah
at test:blat
at test:blab
at test:blag
at of:blah
at of:blat
at of:blab
at of:blag
at the:blah
at the:blat
at the:blab
at the:blag
at string:blah
at string:blat
at string:blab
at string:blag
at tokenizer:blah
at tokenizer:blat
at tokenizer:blab
at tokenizer:blag
at function.:blah
at function.:blat
at function.:blab
at function.:blag
'''
self.do_run(src, expected)
def test_parseInt(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
if Settings.QUANTUM_SIZE == 1: return self.skip('Q1 and I64_1 do not mix well yet')
src = open(path_from_root('tests', 'parseInt', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'parseInt', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_printf(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
self.banned_js_engines = [NODE_JS, V8_ENGINE] # SpiderMonkey and V8 do different things to float64 typed arrays, un-NaNing, etc.
src = open(path_from_root('tests', 'printf', 'test.c'), 'r').read()
expected = [open(path_from_root('tests', 'printf', 'output.txt'), 'r').read(),
open(path_from_root('tests', 'printf', 'output_i64_1.txt'), 'r').read()]
self.do_run(src, expected)
def test_printf_types(self):
src = r'''
#include <stdio.h>
int main() {
char c = '1';
short s = 2;
int i = 3;
long long l = 4;
float f = 5.5;
double d = 6.6;
printf("%c,%hd,%d,%lld,%.1f,%.1llf\n", c, s, i, l, f, d);
return 0;
}
'''
self.do_run(src, '1,2,3,4,5.5,6.6\n')
def test_vprintf(self):
src = r'''
#include <stdio.h>
#include <stdarg.h>
void print(char* format, ...) {
va_list args;
va_start (args, format);
vprintf (format, args);
va_end (args);
}
int main () {
print("Call with %d variable argument.\n", 1);
print("Call with %d variable %s.\n", 2, "arguments");
return 0;
}
'''
expected = '''
Call with 1 variable argument.
Call with 2 variable arguments.
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_atoi(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main () {
printf("%d*", atoi(""));
printf("%d*", atoi("a"));
printf("%d*", atoi(" b"));
printf("%d*", atoi(" c "));
printf("%d*", atoi("6"));
printf("%d*", atoi(" 5"));
printf("%d*", atoi("4 "));
printf("%d*", atoi("3 6"));
printf("%d*", atoi(" 3 7"));
printf("%d*", atoi("9 d"));
printf("%d\n", atoi(" 8 e"));
return 0;
}
'''
self.do_run(src, '0*0*0*0*6*5*4*3*3*9*8')
def test_sscanf(self):
src = r'''
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
int main () {
#define CHECK(str) \
{ \
char name[1000]; \
memset(name, 0, 1000); \
int prio = 99; \
sscanf(str, "%s %d", name, &prio); \
printf("%s : %d\n", name, prio); \
}
CHECK("en-us 2");
CHECK("en-r");
CHECK("en 3");
printf("%f, %f\n", atof("1.234567"), atof("cheez"));
float a = -1;
sscanf("-3.03", "%f", &a);
printf("%.4f\n", a);
char buffy[100];
sscanf("cheez some thing moar 123\nyet more\n", "cheez %s", buffy);
printf("|%s|\n", buffy);
sscanf("cheez something\nmoar 123\nyet more\n", "cheez %s", buffy);
printf("|%s|\n", buffy);
sscanf("cheez somethingmoar\tyet more\n", "cheez %s", buffy);
printf("|%s|\n", buffy);
return 0;
}
'''
self.do_run(src, 'en-us : 2\nen-r : 99\nen : 3\n1.234567, 0.000000\n-3.0300\n|some|\n|something|\n|somethingmoar|')
def test_sscanf_2(self):
# doubles
if Settings.USE_TYPED_ARRAYS == 2:
for ftype in ['float', 'double']:
src = r'''
#include <stdio.h>
int main(){
char strval1[] = "1.2345678901";
char strval2[] = "1.23456789e5";
char strval3[] = "1.23456789E5";
char strval4[] = "1.2345678e-5";
char strval5[] = "1.2345678E-5";
double dblval = 1.2345678901;
double tstval;
sscanf(strval1, "%lf", &tstval);
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval2, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval3, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval4, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval5, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
return 0;
}
'''
if ftype == 'float':
self.do_run(src.replace('%lf', '%f').replace('double', 'float'), '''Pass: 1.234568 1.234568
Pass: 123456.789063 123456.789063
Pass: 123456.789063 123456.789063
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
else:
self.do_run(src, '''Pass: 1.234568 1.234568
Pass: 123456.789000 123456.789000
Pass: 123456.789000 123456.789000
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
def test_langinfo(self):
src = open(path_from_root('tests', 'langinfo', 'test.c'), 'r').read()
expected = open(path_from_root('tests', 'langinfo', 'output.txt'), 'r').read()
self.do_run(src, expected, extra_emscripten_args=['-H', 'libc/langinfo.h'])
def test_files(self):
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here, to test we don't break FS stuff
Settings.CORRECT_SIGNS = 1 # Just so our output is what we expect. Can flip them both.
post = '''
def process(filename):
src = \'\'\'
var Module = {
'noFSInit': true,
'preRun': function() {
FS.createDataFile('/', 'somefile.binary', [100, 200, 50, 25, 10, 77, 123], true, false); // 200 becomes -56, since signed chars are used in memory
FS.createLazyFile('/', 'test.file', 'test.file', true, false);
var test_files_input = 'hi there!';
var test_files_input_index = 0;
FS.init(function() {
return test_files_input.charCodeAt(test_files_input_index++) || null;
});
}
};
\'\'\' + open(filename, 'r').read()
open(filename, 'w').write(src)
'''
other = open(os.path.join(self.get_dir(), 'test.file'), 'w')
other.write('some data');
other.close()
src = open(path_from_root('tests', 'files.cpp'), 'r').read()
self.do_run(src, 'size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\ntexte\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\n',
post_build=post, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_files_m(self):
# Test for Module.stdin etc.
Settings.CORRECT_SIGNS = 1
post = '''
def process(filename):
src = \'\'\'
var data = [10, 20, 40, 30];
var Module = {
stdin: function() { return data.pop() || null },
stdout: function(x) { Module.print('got: ' + x) }
};
\'\'\' + open(filename, 'r').read()
open(filename, 'w').write(src)
'''
src = r'''
#include <stdio.h>
#include <unistd.h>
int main () {
char c;
fprintf(stderr, "isatty? %d,%d,%d\n", isatty(fileno(stdin)), isatty(fileno(stdout)), isatty(fileno(stderr)));
while ((c = fgetc(stdin)) != EOF) {
putc(c+5, stdout);
}
return 0;
}
'''
self.do_run(src, 'isatty? 0,0,1\ngot: 35\ngot: 45\ngot: 25\ngot: 15\n', post_build=post)
def test_folders(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
FS.createFolder('/', 'test', true, false);
FS.createPath('/', 'test/hello/world/', true, false);
FS.createPath('/test', 'goodbye/world/', true, false);
FS.createPath('/test/goodbye', 'noentry', false, false);
FS.createDataFile('/test', 'freeforall.ext', 'abc', true, true);
FS.createDataFile('/test', 'restricted.ext', 'def', false, false);
\'\'\'
)
open(filename, 'w').write(src)
'''
src = r'''
#include <stdio.h>
#include <dirent.h>
#include <errno.h>
int main() {
struct dirent *e;
// Basic correct behaviour.
DIR* d = opendir("/test");
printf("--E: %d\n", errno);
while ((e = readdir(d))) puts(e->d_name);
printf("--E: %d\n", errno);
// Empty folder; tell/seek.
puts("****");
d = opendir("/test/hello/world/");
e = readdir(d);
puts(e->d_name);
int pos = telldir(d);
e = readdir(d);
puts(e->d_name);
seekdir(d, pos);
e = readdir(d);
puts(e->d_name);
// Errors.
puts("****");
printf("--E: %d\n", errno);
d = opendir("/test/goodbye/noentry");
printf("--E: %d, D: %d\n", errno, d);
d = opendir("/i/dont/exist");
printf("--E: %d, D: %d\n", errno, d);
d = opendir("/test/freeforall.ext");
printf("--E: %d, D: %d\n", errno, d);
while ((e = readdir(d))) puts(e->d_name);
printf("--E: %d\n", errno);
return 0;
}
'''
expected = '''
--E: 0
.
..
hello
goodbye
freeforall.ext
restricted.ext
--E: 0
****
.
..
..
****
--E: 0
--E: 13, D: 0
--E: 2, D: 0
--E: 20, D: 0
--E: 9
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected), post_build=add_pre_run)
def test_stat(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
var f1 = FS.createFolder('/', 'test', true, true);
var f2 = FS.createDataFile(f1, 'file', 'abcdef', true, true);
var f3 = FS.createLink(f1, 'link', 'file', true, true);
var f4 = FS.createDevice(f1, 'device', function(){}, function(){});
f1.timestamp = f2.timestamp = f3.timestamp = f4.timestamp = new Date(1200000000000);
\'\'\'
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'stat', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'stat', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_fcntl(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'test', 'abcdef', true, true);"
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'fcntl', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'fcntl', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_fcntl_open(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
FS.createDataFile('/', 'test-file', 'abcdef', true, true);
FS.createFolder('/', 'test-folder', true, true);
\'\'\'
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'fcntl-open', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'fcntl-open', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_fcntl_misc(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'test', 'abcdef', true, true);"
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'fcntl-misc', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'fcntl-misc', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_poll(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
FS.createDataFile('/', 'file', 'abcdef', true, true);
FS.createDevice('/', 'device', function() {}, function() {});
\'\'\'
)
open(filename, 'w').write(src)
'''
src = r'''
#include <stdio.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
int main() {
struct pollfd multi[5];
multi[0].fd = open("/file", O_RDONLY, 0777);
multi[1].fd = open("/device", O_RDONLY, 0777);
multi[2].fd = 123;
multi[3].fd = open("/file", O_RDONLY, 0777);
multi[4].fd = open("/file", O_RDONLY, 0777);
multi[0].events = POLLIN | POLLOUT | POLLNVAL | POLLERR;
multi[1].events = POLLIN | POLLOUT | POLLNVAL | POLLERR;
multi[2].events = POLLIN | POLLOUT | POLLNVAL | POLLERR;
multi[3].events = 0x00;
multi[4].events = POLLOUT | POLLNVAL | POLLERR;
printf("ret: %d\n", poll(multi, 5, 123));
printf("errno: %d\n", errno);
printf("multi[0].revents: %d\n", multi[0].revents == (POLLIN | POLLOUT));
printf("multi[1].revents: %d\n", multi[1].revents == (POLLIN | POLLOUT));
printf("multi[2].revents: %d\n", multi[2].revents == POLLNVAL);
printf("multi[3].revents: %d\n", multi[3].revents == 0);
printf("multi[4].revents: %d\n", multi[4].revents == POLLOUT);
return 0;
}
'''
expected = r'''
ret: 4
errno: 0
multi[0].revents: 1
multi[1].revents: 1
multi[2].revents: 1
multi[3].revents: 1
multi[4].revents: 1
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected), post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h,poll.h'])
def test_statvfs(self):
src = r'''
#include <stdio.h>
#include <errno.h>
#include <sys/statvfs.h>
int main() {
struct statvfs s;
printf("result: %d\n", statvfs("/test", &s));
printf("errno: %d\n", errno);
printf("f_bsize: %lu\n", s.f_bsize);
printf("f_frsize: %lu\n", s.f_frsize);
printf("f_blocks: %lu\n", s.f_blocks);
printf("f_bfree: %lu\n", s.f_bfree);
printf("f_bavail: %lu\n", s.f_bavail);
printf("f_files: %lu\n", s.f_files);
printf("f_ffree: %lu\n", s.f_ffree);
printf("f_favail: %lu\n", s.f_favail);
printf("f_fsid: %lu\n", s.f_fsid);
printf("f_flag: %lu\n", s.f_flag);
printf("f_namemax: %lu\n", s.f_namemax);
return 0;
}
'''
expected = r'''
result: 0
errno: 0
f_bsize: 4096
f_frsize: 4096
f_blocks: 1000000
f_bfree: 500000
f_bavail: 500000
f_files: 10
f_ffree: 1000000
f_favail: 1000000
f_fsid: 42
f_flag: 2
f_namemax: 255
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_libgen(self):
src = r'''
#include <stdio.h>
#include <libgen.h>
int main() {
char p1[16] = "/usr/lib", p1x[16] = "/usr/lib";
printf("%s -> ", p1);
printf("%s : %s\n", dirname(p1x), basename(p1));
char p2[16] = "/usr", p2x[16] = "/usr";
printf("%s -> ", p2);
printf("%s : %s\n", dirname(p2x), basename(p2));
char p3[16] = "/usr/", p3x[16] = "/usr/";
printf("%s -> ", p3);
printf("%s : %s\n", dirname(p3x), basename(p3));
char p4[16] = "/usr/lib///", p4x[16] = "/usr/lib///";
printf("%s -> ", p4);
printf("%s : %s\n", dirname(p4x), basename(p4));
char p5[16] = "/", p5x[16] = "/";
printf("%s -> ", p5);
printf("%s : %s\n", dirname(p5x), basename(p5));
char p6[16] = "///", p6x[16] = "///";
printf("%s -> ", p6);
printf("%s : %s\n", dirname(p6x), basename(p6));
char p7[16] = "/usr/../lib/..", p7x[16] = "/usr/../lib/..";
printf("%s -> ", p7);
printf("%s : %s\n", dirname(p7x), basename(p7));
char p8[16] = "", p8x[16] = "";
printf("(empty) -> %s : %s\n", dirname(p8x), basename(p8));
printf("(null) -> %s : %s\n", dirname(0), basename(0));
return 0;
}
'''
expected = '''
/usr/lib -> /usr : lib
/usr -> / : usr
/usr/ -> / : usr
/usr/lib/// -> /usr : lib
/ -> / : /
/// -> / : /
/usr/../lib/.. -> /usr/../lib : ..
(empty) -> . : .
(null) -> . : .
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_utime(self):
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
var TEST_F1 = FS.createFolder('/', 'writeable', true, true);
var TEST_F2 = FS.createFolder('/', 'unwriteable', true, false);
\'\'\'
).replace(
'// {{POST_RUN_ADDITIONS}}',
\'\'\'
Module.print('first changed: ' + (TEST_F1.timestamp == 1200000000000));
Module.print('second changed: ' + (TEST_F2.timestamp == 1200000000000));
\'\'\'
)
open(filename, 'w').write(src)
'''
src = r'''
#include <stdio.h>
#include <errno.h>
#include <utime.h>
int main() {
struct utimbuf t = {1000000000, 1200000000};
char* writeable = "/writeable";
char* unwriteable = "/unwriteable";
utime(writeable, &t);
printf("writeable errno: %d\n", errno);
utime(unwriteable, &t);
printf("unwriteable errno: %d\n", errno);
return 0;
}
'''
expected = '''
writeable errno: 0
unwriteable errno: 1
first changed: true
second changed: false
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected), post_build=add_pre_run_and_checks)
def test_direct_string_constant_usage(self):
if self.emcc_args is None: return self.skip('requires libcxx')
src = '''
#include <iostream>
template<int i>
void printText( const char (&text)[ i ] )
{
std::cout << text;
}
int main()
{
printText( "some string constant" );
return 0;
}
'''
self.do_run(src, "some string constant")
def test_istream(self):
if self.emcc_args is None: return self.skip('requires libcxx')
src = '''
#include <string>
#include <sstream>
#include <iostream>
int main()
{
std::string mystring("1 2 3");
std::istringstream is(mystring);
int one, two, three;
is >> one >> two >> three;
printf( "%i %i %i", one, two, three );
}
'''
for linkable in [0, 1]:
Settings.LINKABLE = linkable # regression check for issue #273
self.do_run(src, "1 2 3")
def test_readdir(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createFolder('', 'test', true, true);\\nFS.createLazyFile( 'test', 'some_file', 'http://localhost/some_file', true, false);\\nFS.createFolder('test', 'some_directory', true, true);"
)
open(filename, 'w').write(src)
'''
src = '''
#include <dirent.h>
#include <stdio.h>
int main()
{
DIR * dir;
dirent * entity;
dir = opendir( "test" );
while( ( entity = readdir( dir ) ) )
{
printf( "%s is a %s\\n", entity->d_name, entity->d_type & DT_DIR ? "directory" : "file" );
}
return 0;
}
'''
self.do_run(src, ". is a directory\n.. is a directory\nsome_file is a file\nsome_directory is a directory", post_build=add_pre_run)
def test_fs_base(self):
Settings.INCLUDE_FULL_LIBRARY = 1
try:
addJS = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace('FS.init();', '').replace( # Disable normal initialization, replace with ours
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'filesystem', 'src.js'), 'r').read())
open(filename, 'w').write(src)
'''
src = 'int main() {return 0;}\n'
expected = open(path_from_root('tests', 'filesystem', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=addJS, extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/langinfo.h,libc/time.h'])
finally:
Settings.INCLUDE_FULL_LIBRARY = 0
def test_unistd_access(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'access.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'access.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'access.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_curdir(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'curdir.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'curdir.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'curdir.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_close(self):
src = open(path_from_root('tests', 'unistd', 'close.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'close.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_confstr(self):
src = open(path_from_root('tests', 'unistd', 'confstr.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'confstr.out'), 'r').read()
self.do_run(src, expected, extra_emscripten_args=['-H', 'libc/unistd.h'])
def test_unistd_ttyname(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'ttyname.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'ttyname.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'ttyname.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_dup(self):
src = open(path_from_root('tests', 'unistd', 'dup.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'dup.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_pathconf(self):
src = open(path_from_root('tests', 'unistd', 'pathconf.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'pathconf.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_truncate(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'truncate.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'truncate.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'truncate.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_swab(self):
src = open(path_from_root('tests', 'unistd', 'swab.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'swab.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_isatty(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'isatty.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'isatty.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'isatty.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_sysconf(self):
src = open(path_from_root('tests', 'unistd', 'sysconf.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'sysconf.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_login(self):
src = open(path_from_root('tests', 'unistd', 'login.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'login.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_unlink(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'unlink.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'unlink.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'unlink.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_links(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'links.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'links.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'links.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_sleep(self):
src = open(path_from_root('tests', 'unistd', 'sleep.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'sleep.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_io(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'io.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'io.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'io.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_misc(self):
src = open(path_from_root('tests', 'unistd', 'misc.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'misc.out'), 'r').read()
self.do_run(src, expected)
def test_uname(self):
src = r'''
#include <stdio.h>
#include <sys/utsname.h>
int main() {
struct utsname u;
printf("ret: %d\n", uname(&u));
printf("sysname: %s\n", u.sysname);
printf("nodename: %s\n", u.nodename);
printf("release: %s\n", u.release);
printf("version: %s\n", u.version);
printf("machine: %s\n", u.machine);
printf("invalid: %d\n", uname(0));
return 0;
}
'''
expected = '''
ret: 0
sysname: Emscripten
nodename: emscripten
release: 1.0
version: #1
machine: x86-JS
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_env(self):
src = open(path_from_root('tests', 'env', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'env', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_systypes(self):
src = open(path_from_root('tests', 'systypes', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'systypes', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_getloadavg(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
double load[5] = {42.13, 42.13, 42.13, 42.13, 42.13};
printf("ret: %d\n", getloadavg(load, 5));
printf("load[0]: %.3lf\n", load[0]);
printf("load[1]: %.3lf\n", load[1]);
printf("load[2]: %.3lf\n", load[2]);
printf("load[3]: %.3lf\n", load[3]);
printf("load[4]: %.3lf\n", load[4]);
return 0;
}
'''
expected = '''
ret: 3
load[0]: 0.100
load[1]: 0.100
load[2]: 0.100
load[3]: 42.130
load[4]: 42.130
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_inet(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
printf("*%x,%x,%x,%x*\n", htonl(0x12345678), htons(0xabcd), ntohl(0x43211234), ntohs(0xbeaf));
return 0;
}
'''
self.do_run(src, '*78563412,cdab,34122143,afbe*')
def test_ctype(self):
# The bit fiddling done by the macros using __ctype_b_loc requires this.
Settings.CORRECT_SIGNS = 1
src = open(path_from_root('tests', 'ctype', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'ctype', 'output.txt'), 'r').read()
self.do_run(src, expected)
CORRECT_SIGNS = 0
def test_atomic(self):
src = '''
#include <stdio.h>
int main() {
int x = 10;
int y = __sync_add_and_fetch(&x, 5);
printf("*%d,%d*\\n", x, y);
x = 10;
y = __sync_fetch_and_add(&x, 5);
printf("*%d,%d*\\n", x, y);
x = 10;
y = __sync_lock_test_and_set(&x, 6);
printf("*%d,%d*\\n", x, y);
x = 10;
y = __sync_bool_compare_and_swap(&x, 9, 7);
printf("*%d,%d*\\n", x, y);
y = __sync_bool_compare_and_swap(&x, 10, 7);
printf("*%d,%d*\\n", x, y);
return 0;
}
'''
self.do_run(src, '*15,15*\n*15,10*\n*6,10*\n*10,0*\n*7,1*')
# libc++ tests
def test_iostream(self):
if Settings.QUANTUM_SIZE == 1: return self.skip("we don't support libcxx in q1")
if self.emcc_args is None:
if Building.LLVM_OPTS: return self.skip('optimizing bitcode before emcc can confuse libcxx inclusion')
self.emcc_args = [] # libc++ auto-inclusion is only done if we use emcc
Settings.SAFE_HEAP = 0 # Some spurious warnings from libc++ internals
src = '''
#include <iostream>
int main()
{
std::cout << "hello world" << std::endl << 77 << "." << std::endl;
return 0;
}
'''
# FIXME: should not have so many newlines in output here
self.do_run(src, 'hello world\n77.\n')
def test_stdvec(self):
src = '''
#include <vector>
#include <stdio.h>
struct S {
int a;
float b;
};
void foo(int a, float b)
{
printf("%d:%.2f\\n", a, b);
}
int main ( int argc, char *argv[] )
{
std::vector<S> ar;
S s;
s.a = 789;
s.b = 123.456f;
ar.push_back(s);
s.a = 0;
s.b = 100.1f;
ar.push_back(s);
foo(ar[0].a, ar[0].b);
foo(ar[1].a, ar[1].b);
}
'''
self.do_run(src, '789:123.46\n0:100.1')
### 'Medium' tests
def test_fannkuch(self):
results = [ (1,0), (2,1), (3,2), (4,4), (5,7), (6,10), (7, 16), (8,22) ]
for i, j in results:
src = open(path_from_root('tests', 'fannkuch.cpp'), 'r').read()
self.do_run(src, 'Pfannkuchen(%d) = %d.' % (i,j), [str(i)], no_build=i>1)
def test_raytrace(self):
if Settings.USE_TYPED_ARRAYS == 2: return self.skip('Relies on double value rounding, extremely sensitive')
src = open(path_from_root('tests', 'raytrace.cpp'), 'r').read().replace('double', 'float')
output = open(path_from_root('tests', 'raytrace.ppm'), 'r').read()
self.do_run(src, output, ['3', '16'])#, build_ll_hook=self.do_autodebug)
def test_fasta(self):
results = [ (1,'''GG*ctt**tgagc*'''), (20,'''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tacgtgtagcctagtgtttgtgttgcgttatagtctatttgtggacacagtatggtcaaa**tgacgtcttttgatctgacggcgttaacaaagatactctg*'''),
(50,'''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA*TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACAT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tactDtDagcctatttSVHtHttKtgtHMaSattgWaHKHttttagacatWatgtRgaaa**NtactMcSMtYtcMgRtacttctWBacgaa**agatactctgggcaacacacatacttctctcatgttgtttcttcggacctttcataacct**ttcctggcacatggttagctgcacatcacaggattgtaagggtctagtggttcagtgagc**ggaatatcattcgtcggtggtgttaatctatctcggtgtagcttataaatgcatccgtaa**gaatattatgtttatttgtcggtacgttcatggtagtggtgtcgccgatttagacgtaaa**ggcatgtatg*''') ]
for i, j in results:
src = open(path_from_root('tests', 'fasta.cpp'), 'r').read()
self.do_run(src, j, [str(i)], lambda x: x.replace('\n', '*'), no_build=i>1)
def test_dlmalloc(self):
if self.emcc_args is None: self.emcc_args = [] # dlmalloc auto-inclusion is only done if we use emcc
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ['src.cpp:' + str(i+4) for i in [4816, 4191, 4246, 4199, 4205, 4235, 4227]]
Settings.TOTAL_MEMORY = 100*1024*1024 # needed with typed arrays
src = open(path_from_root('system', 'lib', 'dlmalloc.c'), 'r').read() + '\n\n\n' + open(path_from_root('tests', 'dlmalloc_test.c'), 'r').read()
self.do_run(src, '*1,0*', ['200', '1'])
self.do_run(src, '*400,0*', ['400', '400'], no_build=True)
# Linked version
src = open(path_from_root('tests', 'dlmalloc_test.c'), 'r').read()
self.do_run(src, '*1,0*', ['200', '1'], extra_emscripten_args=['-m'])
self.do_run(src, '*400,0*', ['400', '400'], extra_emscripten_args=['-m'], no_build=True)
if self.emcc_args == []: # TODO: do this in other passes too, passing their opts into emcc
# emcc should build in dlmalloc automatically, and do all the sign correction etc. for it
try_delete(os.path.join(self.get_dir(), 'src.cpp.o.js'))
output = Popen(['python', EMCC, path_from_root('tests', 'dlmalloc_test.c'),
'-o', os.path.join(self.get_dir(), 'src.cpp.o.js')], stdout=PIPE, stderr=self.stderr_redirect).communicate()
self.do_run('x', '*1,0*', ['200', '1'], no_build=True)
self.do_run('x', '*400,0*', ['400', '400'], no_build=True)
# The same for new and all its variants
src = open(path_from_root('tests', 'new.cpp')).read()
for new, delete in [
('malloc(100)', 'free'),
('new char[100]', 'delete[]'),
('new Structy', 'delete'),
('new int', 'delete'),
('new Structy[10]', 'delete[]'),
]:
self.do_run(src.replace('{{{ NEW }}}', new).replace('{{{ DELETE }}}', delete), '*1,0*')
def test_libcxx(self):
self.do_run(open(path_from_root('tests', 'hashtest.cpp')).read(),
'june -> 30\nPrevious (in alphabetical order) is july\nNext (in alphabetical order) is march')
self.do_run('''
#include <set>
#include <stdio.h>
int main() {
std::set<int> *fetchOriginatorNums = new std::set<int>();
fetchOriginatorNums->insert(171);
printf("hello world\\n");
return 1;
}
''', 'hello world');
def test_static_variable(self):
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # LLVM mixes i64 and i8 in the guard check
src = '''
#include <stdio.h>
struct DATA
{
int value;
DATA()
{
value = 0;
}
};
DATA & GetData()
{
static DATA data;
return data;
}
int main()
{
GetData().value = 10;
printf( "value:%i", GetData().value );
}
'''
self.do_run(src, 'value:10')
def test_mmap(self):
src = '''
#include <stdio.h>
#include <sys/mman.h>
#include <assert.h>
int main(int argc, char *argv[]) {
const int NUM_BYTES = 8 * 1024 * 1024;
const int NUM_INTS = NUM_BYTES / sizeof(int);
int* map = (int*)mmap(0, NUM_BYTES, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANON, -1, 0);
assert(map != MAP_FAILED);
int i;
for (i = 0; i < NUM_INTS; i++) {
map[i] = i;
}
for (i = 0; i < NUM_INTS; i++) {
assert(map[i] == i);
}
assert(munmap(map, NUM_BYTES) == 0);
printf("hello,world");
return 0;
}
'''
self.do_run(src, 'hello,world')
self.do_run(src, 'hello,world', force_c=True)
def test_cubescript(self):
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
Building.COMPILER_TEST_OPTS = [] # remove -g, so we have one test without it by default
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # Has some actual loads of unwritten-to places, in the C++ code...
# Overflows happen in hash loop
Settings.CORRECT_OVERFLOWS = 1
Settings.CHECK_OVERFLOWS = 0
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1
self.do_run(path_from_root('tests', 'cubescript'), '*\nTemp is 33\n9\n5\nhello, everyone\n*', main_file='command.cpp')
def test_gcc_unmangler(self):
self.do_run(path_from_root('third_party'), '*d_demangle(char const*, int, unsigned int*)*', args=['_ZL10d_demanglePKciPj'], main_file='gcc_demangler.c')
#### Code snippet that is helpful to search for nonportable optimizations ####
#global LLVM_OPT_OPTS
#for opt in ['-aa-eval', '-adce', '-always-inline', '-argpromotion', '-basicaa', '-basiccg', '-block-placement', '-break-crit-edges', '-codegenprepare', '-constmerge', '-constprop', '-correlated-propagation', '-count-aa', '-dce', '-deadargelim', '-deadtypeelim', '-debug-aa', '-die', '-domfrontier', '-domtree', '-dse', '-extract-blocks', '-functionattrs', '-globaldce', '-globalopt', '-globalsmodref-aa', '-gvn', '-indvars', '-inline', '-insert-edge-profiling', '-insert-optimal-edge-profiling', '-instcombine', '-instcount', '-instnamer', '-internalize', '-intervals', '-ipconstprop', '-ipsccp', '-iv-users', '-jump-threading', '-lazy-value-info', '-lcssa', '-lda', '-libcall-aa', '-licm', '-lint', '-live-values', '-loop-deletion', '-loop-extract', '-loop-extract-single', '-loop-index-split', '-loop-reduce', '-loop-rotate', '-loop-unroll', '-loop-unswitch', '-loops', '-loopsimplify', '-loweratomic', '-lowerinvoke', '-lowersetjmp', '-lowerswitch', '-mem2reg', '-memcpyopt', '-memdep', '-mergefunc', '-mergereturn', '-module-debuginfo', '-no-aa', '-no-profile', '-partial-inliner', '-partialspecialization', '-pointertracking', '-postdomfrontier', '-postdomtree', '-preverify', '-prune-eh', '-reassociate', '-reg2mem', '-regions', '-scalar-evolution', '-scalarrepl', '-sccp', '-scev-aa', '-simplify-libcalls', '-simplify-libcalls-halfpowr', '-simplifycfg', '-sink', '-split-geps', '-sretpromotion', '-strip', '-strip-dead-debug-info', '-strip-dead-prototypes', '-strip-debug-declare', '-strip-nondebug', '-tailcallelim', '-tailduplicate', '-targetdata', '-tbaa']:
# LLVM_OPT_OPTS = [opt]
# try:
# self.do_run(path_from_root(['third_party']), '*d_demangle(char const*, int, unsigned int*)*', args=['_ZL10d_demanglePKciPj'], main_file='gcc_demangler.c')
# print opt, "ok"
# except:
# print opt, "FAIL"
def test_lua(self):
try:
os.environ['EMCC_LEAVE_INPUTS_RAW'] = '1'
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO: make this work')
# Overflows in luaS_newlstr hash loop
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # Has various warnings, with copied HEAP_HISTORY values (fixed if we copy 'null' as the type)
Settings.CORRECT_OVERFLOWS = 1
Settings.CHECK_OVERFLOWS = 0
Settings.CORRECT_SIGNS = 1 # Not sure why, but needed
Settings.INIT_STACK = 1 # TODO: Investigate why this is necessary
self.do_ll_run(path_from_root('tests', 'lua', 'lua.ll'),
'hello lua world!\n17\n1\n2\n3\n4\n7',
args=['-e', '''print("hello lua world!");print(17);for x = 1,4 do print(x) end;print(10-3)'''],
output_nicerizer=lambda string: string.replace('\n\n', '\n').replace('\n\n', '\n'),
extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/langinfo.h,libc/time.h'])
finally:
del os.environ['EMCC_LEAVE_INPUTS_RAW']
def get_freetype(self):
Settings.INIT_STACK = 1 # TODO: Investigate why this is necessary
return self.get_library('freetype', os.path.join('objs', '.libs', 'libfreetype.a'))
def test_freetype(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO: Figure out and try to fix')
if Settings.CORRECT_SIGNS == 0: Settings.CORRECT_SIGNS = 1 # Not sure why, but needed
post = '''
def process(filename):
import tools.shared as shared
# Embed the font into the document
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'font.ttf', %s, true, false);" % str(
map(ord, open(shared.path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), 'rb').read())
)
)
open(filename, 'w').write(src)
'''
# Not needed for js, but useful for debugging
shutil.copyfile(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), os.path.join(self.get_dir(), 'font.ttf'))
# Main
self.do_run(open(path_from_root('tests', 'freetype', 'main.c'), 'r').read(),
open(path_from_root('tests', 'freetype', 'ref.txt'), 'r').read(),
['font.ttf', 'test!', '150', '120', '25'],
libraries=self.get_freetype(),
includes=[path_from_root('tests', 'freetype', 'include')],
post_build=post)
#build_ll_hook=self.do_autodebug)
# github issue 324
print '[issue 324]'
self.do_run(open(path_from_root('tests', 'freetype', 'main_2.c'), 'r').read(),
open(path_from_root('tests', 'freetype', 'ref_2.txt'), 'r').read(),
['font.ttf', 'w', '32', '32', '25'],
libraries=self.get_freetype(),
includes=[path_from_root('tests', 'freetype', 'include')],
post_build=post)
print '[issue 324 case 2]'
self.do_run(open(path_from_root('tests', 'freetype', 'main_3.c'), 'r').read(),
open(path_from_root('tests', 'freetype', 'ref_3.txt'), 'r').read(),
['font.ttf', 'W', '32', '32', '0'],
libraries=self.get_freetype(),
includes=[path_from_root('tests', 'freetype', 'include')],
post_build=post)
print '[issue 324 case 3]'
self.do_run('',
open(path_from_root('tests', 'freetype', 'ref_4.txt'), 'r').read(),
['font.ttf', 'ea', '40', '32', '0'],
no_build=True)
def test_sqlite(self):
# gcc -O3 -I/home/alon/Dev/emscripten/tests/sqlite -ldl src.c
if self.emcc_args is None: return self.skip('Very slow without ta2, and we would also need to include dlmalloc manually without emcc')
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO FIXME')
pgo_data = read_pgo_data(path_from_root('tests', 'sqlite', 'sqlite-autooptimize.fails.txt'))
Settings.CORRECT_SIGNS = 1 # XXX: in default, we fail with 2 here, even though the pgo_data should be correct (and works in s_0_0). Investigate this.
Settings.CORRECT_SIGNS_LINES = pgo_data['signs_lines']
Settings.CORRECT_OVERFLOWS = 0
Settings.CORRECT_ROUNDINGS = 0
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # uses time.h to set random bytes, other stuff
Settings.DISABLE_EXCEPTION_CATCHING = 1
Settings.FAST_MEMORY = 4*1024*1024
Settings.EXPORTED_FUNCTIONS = ['_main', '_sqlite3_open', '_sqlite3_close', '_sqlite3_exec', '_sqlite3_free', '_callback'];
self.do_run(r'''
#define SQLITE_DISABLE_LFS
#define LONGDOUBLE_TYPE double
#define SQLITE_INT64_TYPE long long int
#define SQLITE_THREADSAFE 0
''' + open(path_from_root('tests', 'sqlite', 'sqlite3.c'), 'r').read() +
open(path_from_root('tests', 'sqlite', 'benchmark.c'), 'r').read(),
open(path_from_root('tests', 'sqlite', 'benchmark.txt'), 'r').read(),
includes=[path_from_root('tests', 'sqlite')],
force_c=True,
js_engines=[SPIDERMONKEY_ENGINE]) # V8 is slow
def test_zlib(self):
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
Settings.CORRECT_SIGNS = 1
self.do_run(open(path_from_root('tests', 'zlib', 'example.c'), 'r').read(),
open(path_from_root('tests', 'zlib', 'ref.txt'), 'r').read(),
libraries=self.get_library('zlib', os.path.join('libz.a'), make_args=['libz.a']),
includes=[path_from_root('tests', 'zlib')],
force_c=True)
def test_the_bullet(self): # Called thus so it runs late in the alphabetical cycle... it is long
if Building.LLVM_OPTS and self.emcc_args is None: Settings.SAFE_HEAP = 0 # Optimizations make it so we do not have debug info on the line we need to ignore
# Note: this is also a good test of per-file and per-line changes (since we have multiple files, and correct specific lines)
if Settings.SAFE_HEAP:
# Ignore bitfield warnings
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ['btVoronoiSimplexSolver.h:40', 'btVoronoiSimplexSolver.h:41',
'btVoronoiSimplexSolver.h:42', 'btVoronoiSimplexSolver.h:43']
self.do_run(open(path_from_root('tests', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp'), 'r').read(),
[open(path_from_root('tests', 'bullet', 'output.txt'), 'r').read(), # different roundings
open(path_from_root('tests', 'bullet', 'output2.txt'), 'r').read()],
libraries=self.get_library('bullet', [os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')],
configure_args=['--disable-demos','--disable-dependency-tracking']),
includes=[path_from_root('tests', 'bullet', 'src')],
js_engines=[SPIDERMONKEY_ENGINE]) # V8 issue 1407
def test_poppler(self):
if self.emcc_args is None: return self.skip('very slow, we only do this in emcc runs')
Settings.CORRECT_OVERFLOWS = 1
Settings.CORRECT_SIGNS = 1
Building.COMPILER_TEST_OPTS += [
'-I' + path_from_root('tests', 'freetype', 'include'),
'-I' + path_from_root('tests', 'poppler', 'include'),
]
Settings.INVOKE_RUN = 0 # We append code that does run() ourselves
# See post(), below
input_file = open(os.path.join(self.get_dir(), 'paper.pdf.js'), 'w')
input_file.write(str(map(ord, open(path_from_root('tests', 'poppler', 'paper.pdf'), 'rb').read())))
input_file.close()
post = '''
def process(filename):
# To avoid loading this large file to memory and altering it, we simply append to the end
src = open(filename, 'a')
src.write(
\'\'\'
FS.createDataFile('/', 'paper.pdf', eval(Module.read('paper.pdf.js')), true, false);
run();
Module.print("Data: " + JSON.stringify(FS.root.contents['filename-1.ppm'].contents.map(function(x) { return unSign(x, 8) })));
\'\'\'
)
src.close()
'''
#fontconfig = self.get_library('fontconfig', [os.path.join('src', '.libs', 'libfontconfig.a')]) # Used in file, but not needed, mostly
freetype = self.get_freetype()
poppler = self.get_library('poppler',
[os.path.join('utils', 'pdftoppm.o'),
os.path.join('utils', 'parseargs.o'),
os.path.join('poppler', '.libs', 'libpoppler.a')],
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--enable-shared=no'])
# Combine libraries
combined = os.path.join(self.get_dir(), 'poppler-combined.bc')
Building.link(poppler + freetype, combined)
self.do_ll_run(combined,
map(ord, open(path_from_root('tests', 'poppler', 'ref.ppm'), 'r').read()).__str__().replace(' ', ''),
args='-scale-to 512 paper.pdf filename'.split(' '),
post_build=post)
#, build_ll_hook=self.do_autodebug)
def test_openjpeg(self):
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1
else:
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ["mqc.c:566", "mqc.c:317"]
post = '''
def process(filename):
import tools.shared as shared
original_j2k = shared.path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.j2k')
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'image.j2k', %s, true, false);" % shared.line_splitter(str(
map(ord, open(original_j2k, 'rb').read())
))
).replace(
'// {{POST_RUN_ADDITIONS}}',
"Module.print('Data: ' + JSON.stringify(FS.root.contents['image.raw'].contents));"
)
open(filename, 'w').write(src)
'''
shutil.copy(path_from_root('tests', 'openjpeg', 'opj_config.h'), self.get_dir())
lib = self.get_library('openjpeg',
[os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/index.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/convert.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/__/common/color.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/__/common/getopt.c.o'.split('/')),
os.path.join('bin', self.get_shared_library_name('libopenjpeg.so.1.4.0'))],
configure=['cmake', '.'],
#configure_args=['--enable-tiff=no', '--enable-jp3d=no', '--enable-png=no'],
make_args=[]) # no -j 2, since parallel builds can fail
# We use doubles in JS, so we get slightly different values than native code. So we
# check our output by comparing the average pixel difference
def image_compare(output):
# Get the image generated by JS, from the JSON.stringify'd array
m = re.search('\[[\d, -]*\]', output)
try:
js_data = eval(m.group(0))
except AttributeError:
print 'Failed to find proper image output in: ' + output
raise
js_data = map(lambda x: x if x >= 0 else 256+x, js_data) # Our output may be signed, so unsign it
# Get the correct output
true_data = open(path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.raw'), 'rb').read()
# Compare them
assert(len(js_data) == len(true_data))
num = len(js_data)
diff_total = js_total = true_total = 0
for i in range(num):
js_total += js_data[i]
true_total += ord(true_data[i])
diff_total += abs(js_data[i] - ord(true_data[i]))
js_mean = js_total/float(num)
true_mean = true_total/float(num)
diff_mean = diff_total/float(num)
image_mean = 83.265
#print '[image stats:', js_mean, image_mean, true_mean, diff_mean, num, ']'
assert abs(js_mean - image_mean) < 0.01
assert abs(true_mean - image_mean) < 0.01
assert diff_mean < 0.01
return output
self.do_run(open(path_from_root('tests', 'openjpeg', 'codec', 'j2k_to_image.c'), 'r').read(),
'Successfully generated', # The real test for valid output is in image_compare
'-i image.j2k -o image.raw'.split(' '),
libraries=lib,
includes=[path_from_root('tests', 'openjpeg', 'libopenjpeg'),
path_from_root('tests', 'openjpeg', 'codec'),
path_from_root('tests', 'openjpeg', 'common'),
os.path.join(self.get_build_dir(), 'openjpeg')],
force_c=True,
post_build=post,
output_nicerizer=image_compare)#, build_ll_hook=self.do_autodebug)
def test_python(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO: make this work')
# Overflows in string_hash
Settings.CORRECT_OVERFLOWS = 1
Settings.CHECK_OVERFLOWS = 0
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # Has bitfields which are false positives. Also the PyFloat_Init tries to detect endianness.
Settings.CORRECT_SIGNS = 1 # Not sure why, but needed
Settings.EXPORTED_FUNCTIONS = ['_main', '_PyRun_SimpleStringFlags'] # for the demo
self.do_ll_run(path_from_root('tests', 'python', 'python.small.bc'),
'hello python world!\n[0, 2, 4, 6]\n5\n22\n5.470000',
args=['-S', '-c' '''print "hello python world!"; print [x*2 for x in range(4)]; t=2; print 10-3-t; print (lambda x: x*2)(11); print '%f' % 5.47'''])
def test_lifetime(self):
if self.emcc_args is None: return self.skip('test relies on emcc opts')
try:
os.environ['EMCC_LEAVE_INPUTS_RAW'] = '1'
self.do_ll_run(path_from_root('tests', 'lifetime.ll'), 'hello, world!\n')
if '-O1' in self.emcc_args or '-O2' in self.emcc_args:
assert 'a18' not in open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read(), 'lifetime stuff and their vars must be culled'
else:
assert 'a18' in open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read(), "without opts, it's there"
finally:
del os.environ['EMCC_LEAVE_INPUTS_RAW']
# Test cases in separate files. Note that these files may contain invalid .ll!
# They are only valid enough for us to read for test purposes, not for llvm-as
# to process.
def test_cases(self):
if Building.LLVM_OPTS: return self.skip("Our code is not exactly 'normal' llvm assembly")
try:
os.environ['EMCC_LEAVE_INPUTS_RAW'] = '1'
self.banned_js_engines = [NODE_JS] # node issue 1669, exception causes stdout not to be flushed
Settings.CHECK_OVERFLOWS = 0
for name in glob.glob(path_from_root('tests', 'cases', '*.ll')):
shortname = name.replace('.ll', '')
if '' not in shortname: continue
if '_ta2' in shortname and not Settings.USE_TYPED_ARRAYS == 2:
print self.skip('case "%s" only relevant for ta2' % shortname)
continue
print >> sys.stderr, "Testing case '%s'..." % shortname
output_file = path_from_root('tests', 'cases', shortname + '.txt')
if Settings.QUANTUM_SIZE == 1:
q1_output_file = path_from_root('tests', 'cases', shortname + '_q1.txt')
if os.path.exists(q1_output_file):
output_file = q1_output_file
if os.path.exists(output_file):
output = open(output_file, 'r').read()
else:
output = 'hello, world!'
if output.rstrip() != 'skip':
self.do_ll_run(path_from_root('tests', 'cases', name), output)
# Optional source checking, a python script that gets a global generated with the source
src_checker = path_from_root('tests', 'cases', shortname + '.py')
if os.path.exists(src_checker):
generated = open('src.cpp.o.js').read()
exec(open(src_checker).read())
finally:
del os.environ['EMCC_LEAVE_INPUTS_RAW']
# Autodebug the code
def do_autodebug(self, filename):
output = Popen(['python', AUTODEBUGGER, filename+'.o.ll', filename+'.o.ll.ll'], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
assert 'Success.' in output, output
self.prep_ll_run(filename, filename+'.o.ll.ll', force_recompile=True) # rebuild .bc # TODO: use code in do_autodebug_post for this
# Autodebug the code, after LLVM opts. Will only work once!
def do_autodebug_post(self, filename):
if not hasattr(self, 'post'):
print 'Asking for post re-call'
self.post = True
return True
print 'Autodebugging during post time'
delattr(self, 'post')
output = Popen(['python', AUTODEBUGGER, filename+'.o.ll', filename+'.o.ll.ll'], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
assert 'Success.' in output, output
shutil.copyfile(filename + '.o.ll.ll', filename + '.o.ll')
Building.llvm_as(filename)
Building.llvm_dis(filename)
def test_autodebug(self):
if Building.LLVM_OPTS: return self.skip('LLVM opts mess us up')
# Run a test that should work, generating some code
self.test_structs()
filename = os.path.join(self.get_dir(), 'src.cpp')
self.do_autodebug(filename)
# Compare to each other, and to expected output
self.do_ll_run(path_from_root('tests', filename+'.o.ll.ll'), '''AD:-1,1''')
assert open('stdout').read().startswith('AD:-1'), 'We must note when we enter functions'
# Test using build_ll_hook
src = '''
#include <stdio.h>
char cache[256], *next = cache;
int main()
{
cache[10] = 25;
next[20] = 51;
int x = cache[10];
double y = 11.52;
printf("*%d,%d,%.2f*\\n", x, cache[20], y);
return 0;
}
'''
self.do_run(src, '''AD:-1,1''', build_ll_hook=self.do_autodebug)
def test_profiling(self):
src = '''
#include <emscripten.h>
#include <unistd.h>
int main()
{
EMSCRIPTEN_PROFILE_INIT(3);
EMSCRIPTEN_PROFILE_BEGIN(0);
usleep(10 * 1000);
EMSCRIPTEN_PROFILE_END(0);
EMSCRIPTEN_PROFILE_BEGIN(1);
usleep(50 * 1000);
EMSCRIPTEN_PROFILE_END(1);
EMSCRIPTEN_PROFILE_BEGIN(2);
usleep(250 * 1000);
EMSCRIPTEN_PROFILE_END(2);
return 0;
}
'''
post1 = '''
def process(filename):
src = open(filename, 'a')
src.write(\'\'\'
Profiling.dump();
\'\'\')
src.close()
'''
self.do_run(src, '''Profiling data:
Block 0: ''', post_build=post1)
### Integration tests
def test_ccall(self):
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here, to test we export things right
src = r'''
#include <stdio.h>
// Optimizations might wipe out our functions without this
#define KEEPALIVE __attribute__((used))
extern "C" {
int KEEPALIVE get_int() { return 5; }
float KEEPALIVE get_float() { return 3.14; }
char * KEEPALIVE get_string() { return "hello world"; }
void KEEPALIVE print_int(int x) { printf("%d\n", x); }
void KEEPALIVE print_float(float x) { printf("%.2f\n", x); }
void KEEPALIVE print_string(char *x) { printf("%s\n", x); }
int KEEPALIVE multi(int x, float y, int z, char *str) { if (x) puts(str); return (x+y)*z; }
int * KEEPALIVE pointer(int *in) { printf("%d\n", *in); static int ret = 21; return &ret; }
}
int main(int argc, char **argv) {
// keep them alive
if (argc == 10) return get_int();
if (argc == 11) return get_float();
if (argc == 12) return get_string()[0];
if (argc == 13) print_int(argv[0][0]);
if (argc == 14) print_float(argv[0][0]);
if (argc == 15) print_string(argv[0]);
if (argc == 16) pointer((int*)argv[0]);
if (argc % 17 == 12) return multi(argc, float(argc)/2, argc+1, argv[0]);
return 0;
}
'''
post = '''
def process(filename):
src = \'\'\'
var Module = {
'postRun': function() {
Module.print('*');
var ret;
ret = Module['ccall']('get_int', 'number'); Module.print([typeof ret, ret]);
ret = ccall('get_float', 'number'); Module.print([typeof ret, ret.toFixed(2)]);
ret = ccall('get_string', 'string'); Module.print([typeof ret, ret]);
ret = ccall('print_int', null, ['number'], [12]); Module.print(typeof ret);
ret = ccall('print_float', null, ['number'], [14.56]); Module.print(typeof ret);
ret = ccall('print_string', null, ['string'], ["cheez"]); Module.print(typeof ret);
ret = ccall('print_string', null, ['array'], [[97, 114, 114, 45, 97, 121, 0]]); Module.print(typeof ret);
ret = ccall('multi', 'number', ['number', 'number', 'number', 'string'], [2, 1.4, 3, 'more']); Module.print([typeof ret, ret]);
var p = ccall('malloc', 'pointer', ['number'], [4]);
setValue(p, 650, 'i32');
ret = ccall('pointer', 'pointer', ['pointer'], [p]); Module.print([typeof ret, getValue(ret, 'i32')]);
Module.print('*');
// part 2: cwrap
var multi = Module['cwrap']('multi', 'number', ['number', 'number', 'number', 'string']);
Module.print(multi(2, 1.4, 3, 'atr'));
Module.print(multi(8, 5.4, 4, 'bret'));
Module.print('*');
// part 3: avoid stack explosion
for (var i = 0; i < TOTAL_STACK/60; i++) {
ccall('multi', 'number', ['number', 'number', 'number', 'string'], [0, 0, 0, '123456789012345678901234567890123456789012345678901234567890']);
}
Module.print('stack is ok.');
}
};
\'\'\' + open(filename, 'r').read()
open(filename, 'w').write(src)
'''
Settings.EXPORTED_FUNCTIONS = ['_get_int', '_get_float', '_get_string', '_print_int', '_print_float', '_print_string', '_multi', '_pointer', '_malloc']
self.do_run(src, '*\nnumber,5\nnumber,3.14\nstring,hello world\n12\nundefined\n14.56\nundefined\ncheez\nundefined\narr-ay\nundefined\nmore\nnumber,10\n650\nnumber,21\n*\natr\n10\nbret\n53\n*\nstack is ok.\n', post_build=post)
def test_scriptaclass(self):
header_filename = os.path.join(self.get_dir(), 'header.h')
header = '''
struct ScriptMe {
int value;
ScriptMe(int val);
int getVal(); // XXX Sadly, inlining these will result in LLVM not
// producing any code for them (when just building
// as a library)
void mulVal(int mul);
};
'''
h = open(header_filename, 'w')
h.write(header)
h.close()
src = '''
#include "header.h"
ScriptMe::ScriptMe(int val) : value(val) { }
int ScriptMe::getVal() { return value; }
void ScriptMe::mulVal(int mul) { value *= mul; }
'''
# Way 1: use demangler and namespacer
script_src = '''
var sme = Module._.ScriptMe.__new__(83); // malloc(sizeof(ScriptMe)), ScriptMe::ScriptMe(sme, 83) / new ScriptMe(83) (at addr sme)
Module._.ScriptMe.mulVal(sme, 2); // ScriptMe::mulVal(sme, 2) sme.mulVal(2)
Module.print('*' + Module._.ScriptMe.getVal(sme) + '*');
_free(sme);
Module.print('*ok*');
'''
post = '''
def process(filename):
Popen(['python', DEMANGLER, filename], stdout=open(filename + '.tmp', 'w')).communicate()
Popen(['python', NAMESPACER, filename, filename + '.tmp'], stdout=open(filename + '.tmp2', 'w')).communicate()
src = open(filename, 'r').read().replace(
'// {{MODULE_ADDITIONS}',
'Module["_"] = ' + open(filename + '.tmp2', 'r').read().replace('var ModuleNames = ', '').rstrip() + ';\n\n' + script_src + '\n\n' +
'// {{MODULE_ADDITIONS}'
)
open(filename, 'w').write(src)
'''
# XXX disable due to possible v8 bug -- self.do_run(src, '*166*\n*ok*', post_build=post)
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here, to test we export things right
# Way 2: use CppHeaderParser
Settings.RUNTIME_TYPE_INFO = 1
header = '''
#include <stdio.h>
class Parent {
protected:
int value;
public:
Parent(int val);
int getVal() { return value; }; // inline should work just fine here, unlike Way 1 before
void mulVal(int mul);
};
class Child1 : public Parent {
public:
Child1() : Parent(7) { printf("Child1:%d\\n", value); };
Child1(int val) : Parent(val*2) { value -= 1; printf("Child1:%d\\n", value); };
int getValSqr() { return value*value; }
int getValSqr(int more) { return value*value*more; }
int getValTimes(int times=1) { return value*times; }
};
class Child2 : public Parent {
public:
Child2() : Parent(9) { printf("Child2:%d\\n", value); };
int getValCube() { return value*value*value; }
static void printStatic() { printf("*static*\\n"); }
virtual void virtualFunc() { printf("*virtualf*\\n"); }
virtual void virtualFunc2() { printf("*virtualf2*\\n"); }
static void runVirtualFunc(Child2 *self) { self->virtualFunc(); };
private:
void doSomethingSecret() { printf("security breached!\\n"); }; // we should not be able to do this
};
'''
open(header_filename, 'w').write(header)
basename = os.path.join(self.get_dir(), 'bindingtest')
output = Popen(['python', BINDINGS_GENERATOR, basename, header_filename], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
#print output
assert 'Traceback' not in output, 'Failure in binding generation: ' + output
src = '''
#include "header.h"
Parent::Parent(int val) : value(val) { printf("Parent:%d\\n", val); }
void Parent::mulVal(int mul) { value *= mul; }
#include "bindingtest.cpp"
'''
post2 = '''
def process(filename):
src = open(filename, 'a')
src.write(open('bindingtest.js').read() + '\\n\\n')
src.close()
'''
post3 = '''
def process(filename):
script_src_2 = \'\'\'
var sme = new Module.Parent(42);
sme.mulVal(2);
Module.print('*')
Module.print(sme.getVal());
Module.print('c1');
var c1 = new Module.Child1();
Module.print(c1.getVal());
c1.mulVal(2);
Module.print(c1.getVal());
Module.print(c1.getValSqr());
Module.print(c1.getValSqr(3));
Module.print(c1.getValTimes()); // default argument should be 1
Module.print(c1.getValTimes(2));
Module.print('c1 v2');
c1 = new Module.Child1(8); // now with a parameter, we should handle the overloading automatically and properly and use constructor #2
Module.print(c1.getVal());
c1.mulVal(2);
Module.print(c1.getVal());
Module.print(c1.getValSqr());
Module.print(c1.getValSqr(3));
Module.print('c2')
var c2 = new Module.Child2();
Module.print(c2.getVal());
c2.mulVal(2);
Module.print(c2.getVal());
Module.print(c2.getValCube());
var succeeded;
try {
succeeded = 0;
Module.print(c2.doSomethingSecret()); // should fail since private
succeeded = 1;
} catch(e) {}
Module.print(succeeded);
try {
succeeded = 0;
Module.print(c2.getValSqr()); // function from the other class
succeeded = 1;
} catch(e) {}
Module.print(succeeded);
try {
succeeded = 0;
c2.getValCube(); // sanity
succeeded = 1;
} catch(e) {}
Module.print(succeeded);
Module.Child2.prototype.printStatic(); // static calls go through the prototype
// virtual function
c2.virtualFunc();
Module.Child2.prototype.runVirtualFunc(c2);
c2.virtualFunc2();
// extend the class from JS
var c3 = new Module.Child2;
Module.customizeVTable(c3, [{
original: Module.Child2.prototype.virtualFunc,
replacement: function() {
Module.print('*js virtualf replacement*');
}
}, {
original: Module.Child2.prototype.virtualFunc2,
replacement: function() {
Module.print('*js virtualf2 replacement*');
}
}]);
c3.virtualFunc();
Module.Child2.prototype.runVirtualFunc(c3);
c3.virtualFunc2();
c2.virtualFunc(); // original should remain the same
Module.Child2.prototype.runVirtualFunc(c2);
c2.virtualFunc2();
Module.print('*ok*');
\'\'\'
src = open(filename, 'a')
src.write(script_src_2 + '\\n')
src.close()
'''
self.do_run(src, '''*
84
c1
Parent:7
Child1:7
7
14
196
588
14
28
c1 v2
Parent:16
Child1:15
15
30
900
2700
c2
Parent:9
Child2:9
9
18
5832
0
0
1
*static*
*virtualf*
*virtualf*
*virtualf2*
Parent:9
Child2:9
*js virtualf replacement*
*js virtualf replacement*
*js virtualf2 replacement*
*virtualf*
*virtualf*
*virtualf2*
*ok*
''', post_build=[post2, post3])
def test_scriptaclass_2(self):
header_filename = os.path.join(self.get_dir(), 'header.h')
header = '''
#include <stdio.h>
#include <string.h>
class StringUser {
char *s;
int i;
public:
StringUser(char *string, int integer) : s(strdup(string)), i(integer) {}
void Print(int anotherInteger, char *anotherString) {
printf("|%s|%d|%s|%d|\\n", s, i, anotherString, anotherInteger);
}
void CallOther(StringUser *fr) { fr->Print(i, s); }
};
'''
open(header_filename, 'w').write(header)
basename = os.path.join(self.get_dir(), 'bindingtest')
output = Popen(['python', BINDINGS_GENERATOR, basename, header_filename], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
#print output
assert 'Traceback' not in output, 'Failure in binding generation: ' + output
src = '''
#include "header.h"
#include "bindingtest.cpp"
'''
post = '''
def process(filename):
src = open(filename, 'a')
src.write(open('bindingtest.js').read() + '\\n\\n')
src.write(\'\'\'
var user = new Module.StringUser("hello", 43);
user.Print(41, "world");
\'\'\')
src.close()
'''
self.do_run(src, '|hello|43|world|41|', post_build=post)
def test_typeinfo(self):
if self.emcc_args is not None and self.emcc_args != []: return self.skip('full LLVM opts optimize out all the code that uses the type')
Settings.RUNTIME_TYPE_INFO = 1
if Settings.QUANTUM_SIZE != 4: return self.skip('We assume normal sizes in the output here')
src = '''
#include<stdio.h>
struct UserStruct {
int x;
char y;
short z;
};
struct Encloser {
short x;
UserStruct us;
int y;
};
int main() {
Encloser e;
e.us.y = 5;
printf("*ok:%d*\\n", e.us.y);
return 0;
}
'''
post = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{POST_RUN_ADDITIONS}}',
\'\'\'
if (Runtime.typeInfo) {
Module.print('|' + Runtime.typeInfo.UserStruct.fields + '|' + Runtime.typeInfo.UserStruct.flatIndexes + '|');
var t = Runtime.generateStructInfo(['x', { us: ['x', 'y', 'z'] }, 'y'], 'Encloser')
Module.print('|' + [t.x, t.us.x, t.us.y, t.us.z, t.y] + '|');
Module.print('|' + JSON.stringify(Runtime.generateStructInfo(['x', 'y', 'z'], 'UserStruct')) + '|');
} else {
Module.print('No type info.');
}
\'\'\'
)
open(filename, 'w').write(src)
'''
self.do_run(src,
'*ok:5*\n|i32,i8,i16|0,4,6|\n|0,4,8,10,12|\n|{"__size__":8,"x":0,"y":4,"z":6}|',
post_build=post)
# Make sure that without the setting, we don't spam the .js with the type info
Settings.RUNTIME_TYPE_INFO = 0
self.do_run(src, 'No type info.', post_build=post)
### Tests for tools
def test_safe_heap(self):
if not Settings.SAFE_HEAP: return self.skip('We need SAFE_HEAP to test SAFE_HEAP')
if Settings.USE_TYPED_ARRAYS == 2: return self.skip('It is ok to violate the load-store assumption with TA2')
if Building.LLVM_OPTS: return self.skip('LLVM can optimize away the intermediate |x|')
src = '''
#include<stdio.h>
int main() {
int *x = new int;
*x = 20;
float *y = (float*)x;
printf("%f\\n", *y);
printf("*ok*\\n");
return 0;
}
'''
try:
self.do_run(src, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
# And we should not fail if we disable checking on that line
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ["src.cpp:7"]
self.do_run(src, '*ok*')
# But if we disable the wrong lines, we still fail
Settings.SAFE_HEAP_LINES = ["src.cpp:99"]
try:
self.do_run(src, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
# And reverse the checks with = 2
Settings.SAFE_HEAP = 2
Settings.SAFE_HEAP_LINES = ["src.cpp:99"]
self.do_run(src, '*ok*')
Settings.SAFE_HEAP = 1
# Linking multiple files should work too
module = '''
#include<stdio.h>
void callFunc() {
int *x = new int;
*x = 20;
float *y = (float*)x;
printf("%f\\n", *y);
}
'''
module_name = os.path.join(self.get_dir(), 'module.cpp')
open(module_name, 'w').write(module)
main = '''
#include<stdio.h>
extern void callFunc();
int main() {
callFunc();
int *x = new int;
*x = 20;
float *y = (float*)x;
printf("%f\\n", *y);
printf("*ok*\\n");
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.cpp')
open(main_name, 'w').write(main)
Building.emcc(module_name, ['-g'])
Building.emcc(main_name, ['-g'])
all_name = os.path.join(self.get_dir(), 'all.bc')
Building.link([module_name + '.o', main_name + '.o'], all_name)
try:
self.do_ll_run(all_name, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
# And we should not fail if we disable checking on those lines
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ["module.cpp:7", "main.cpp:9"]
self.do_ll_run(all_name, '*ok*')
# But we will fail if we do not disable exactly what we need to - any mistake leads to error
for lines in [["module.cpp:22", "main.cpp:9"], ["module.cpp:7", "main.cpp:29"], ["module.cpp:127", "main.cpp:449"], ["module.cpp:7"], ["main.cpp:9"]]:
Settings.SAFE_HEAP_LINES = lines
try:
self.do_ll_run(all_name, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
def test_check_overflow(self):
Settings.CHECK_OVERFLOWS = 1
Settings.CORRECT_OVERFLOWS = 0
src = '''
#include<stdio.h>
int main() {
int t = 77;
for (int i = 0; i < 30; i++) {
//t = (t << 2) + t + 1; // This would have worked, since << forces into 32-bit int...
t = t*5 + 1; // Python lookdict_string has ~the above line, which turns into this one with optimizations...
printf("%d,%d\\n", t, t & 127);
}
return 0;
}
'''
try:
self.do_run(src, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Too many corrections' in str(e), str(e)
def test_debug(self):
src = '''
#include <stdio.h>
#include <assert.h>
void checker(int x) {
x += 20;
assert(x < 15); // this is line 7!
}
int main() {
checker(10);
return 0;
}
'''
try:
post = r'''
def process(filename):
lines = open(filename, 'r').readlines()
lines = filter(lambda line: '___assert_fail(' in line or '___assert_func(' in line, lines)
found_line_num = any(('//@line 7 "' in line) for line in lines)
found_filename = any(('src.cpp"\n' in line) for line in lines)
assert found_line_num, 'Must have debug info with the line number'
assert found_filename, 'Must have debug info with the filename'
'''
self.do_run(src, '*nothingatall*', post_build=post)
except Exception, e:
# This test *should* fail
assert 'Assertion failed' in str(e), str(e)
def test_linespecific(self):
if self.emcc_args: self.emcc_args += ['--llvm-opts', '0'] # llvm full opts make the expected failures here not happen
Settings.CHECK_SIGNS = 0
Settings.CHECK_OVERFLOWS = 0
# Signs
src = '''
#include <stdio.h>
#include <assert.h>
int main()
{
int varey = 100;
unsigned int MAXEY = -1;
printf("*%d*\\n", varey >= MAXEY); // 100 >= -1? not in unsigned!
}
'''
Settings.CORRECT_SIGNS = 0
self.do_run(src, '*1*') # This is a fail - we expect 0
Settings.CORRECT_SIGNS = 1
self.do_run(src, '*0*') # Now it will work properly
# And now let's fix just that one line
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ["src.cpp:9"]
self.do_run(src, '*0*')
# Fixing the wrong line should not work
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ["src.cpp:3"]
self.do_run(src, '*1*')
# And reverse the checks with = 2
Settings.CORRECT_SIGNS = 3
Settings.CORRECT_SIGNS_LINES = ["src.cpp:3"]
self.do_run(src, '*0*')
Settings.CORRECT_SIGNS = 3
Settings.CORRECT_SIGNS_LINES = ["src.cpp:9"]
self.do_run(src, '*1*')
Settings.CORRECT_SIGNS = 0
# Overflows
src = '''
#include<stdio.h>
int main() {
int t = 77;
for (int i = 0; i < 30; i++) {
t = t*5 + 1;
}
printf("*%d,%d*\\n", t, t & 127);
return 0;
}
'''
correct = '*186854335,63*'
Settings.CORRECT_OVERFLOWS = 0
try:
self.do_run(src, correct)
raise Exception('UNEXPECTED-PASS')
except Exception, e:
assert 'UNEXPECTED' not in str(e), str(e)
assert 'Expected to find' in str(e), str(e)
Settings.CORRECT_OVERFLOWS = 1
self.do_run(src, correct) # Now it will work properly
# And now let's fix just that one line
Settings.CORRECT_OVERFLOWS = 2
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:6"]
self.do_run(src, correct)
# Fixing the wrong line should not work
Settings.CORRECT_OVERFLOWS = 2
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:3"]
try:
self.do_run(src, correct)
raise Exception('UNEXPECTED-PASS')
except Exception, e:
assert 'UNEXPECTED' not in str(e), str(e)
assert 'Expected to find' in str(e), str(e)
# And reverse the checks with = 2
Settings.CORRECT_OVERFLOWS = 3
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:3"]
self.do_run(src, correct)
Settings.CORRECT_OVERFLOWS = 3
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:6"]
try:
self.do_run(src, correct)
raise Exception('UNEXPECTED-PASS')
except Exception, e:
assert 'UNEXPECTED' not in str(e), str(e)
assert 'Expected to find' in str(e), str(e)
Settings.CORRECT_OVERFLOWS = 0
# Roundings
src = '''
#include <stdio.h>
#include <assert.h>
int main()
{
TYPE x = -5;
printf("*%d*", x/2);
x = 5;
printf("*%d*", x/2);
float y = -5.33;
x = y;
printf("*%d*", x);
y = 5.33;
x = y;
printf("*%d*", x);
printf("\\n");
}
'''
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 0
self.do_run(src.replace('TYPE', 'long long'), '*-3**2**-6**5*') # JS floor operations, always to the negative. This is an undetected error here!
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*') # We get these right, since they are 32-bit and we can shortcut using the |0 trick
self.do_run(src.replace('TYPE', 'unsigned int'), '*-3**2**-6**5*') # We fail, since no fast shortcut for 32-bit unsigneds
Settings.CORRECT_ROUNDINGS = 1
Settings.CORRECT_SIGNS = 1 # To be correct here, we need sign corrections as well
self.do_run(src.replace('TYPE', 'long long'), '*-2**2**-5**5*') # Correct
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*') # Correct
self.do_run(src.replace('TYPE', 'unsigned int'), '*2147483645**2**-5**5*') # Correct
Settings.CORRECT_SIGNS = 0
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 2
Settings.CORRECT_ROUNDINGS_LINES = ["src.cpp:13"] # Fix just the last mistake
self.do_run(src.replace('TYPE', 'long long'), '*-3**2**-5**5*')
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*') # Here we are lucky and also get the first one right
self.do_run(src.replace('TYPE', 'unsigned int'), '*-3**2**-5**5*') # No such luck here
# And reverse the check with = 2
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 3
Settings.CORRECT_ROUNDINGS_LINES = ["src.cpp:999"]
self.do_run(src.replace('TYPE', 'long long'), '*-2**2**-5**5*')
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*')
Settings.CORRECT_SIGNS = 1 # To be correct here, we need sign corrections as well
self.do_run(src.replace('TYPE', 'unsigned int'), '*2147483645**2**-5**5*')
Settings.CORRECT_SIGNS = 0
def test_pgo(self):
Settings.PGO = Settings.CHECK_OVERFLOWS = Settings.CORRECT_OVERFLOWS = Settings.CHECK_SIGNS = Settings.CORRECT_SIGNS = 1
src = '''
#include<stdio.h>
int main() {
int t = 77;
for (int i = 0; i < 30; i++) {
t = t*5 + 1;
}
printf("*%d,%d*\\n", t, t & 127);
int varey = 100;
unsigned int MAXEY = -1;
for (int j = 0; j < 2; j++) {
printf("*%d*\\n", varey >= MAXEY); // 100 >= -1? not in unsigned!
MAXEY = 1; // So we succeed the second time around
}
return 0;
}
'''
def check(output):
# TODO: check the line #
if self.emcc_args is None or self.emcc_args == []: # LLVM full opts optimize out some corrections
assert 'Overflow|src.cpp:6 : 60 hits, %20 failures' in output, 'no indication of Overflow corrections: ' + output
assert 'UnSign|src.cpp:13 : 6 hits, %17 failures' in output, 'no indication of Sign corrections: ' + output
return output
self.do_run(src, '*186854335,63*\n', output_nicerizer=check)
Settings.PGO = Settings.CHECK_OVERFLOWS = Settings.CORRECT_OVERFLOWS = Settings.CHECK_SIGNS = Settings.CORRECT_SIGNS = 0
# Now, recompile with the PGO data, and it should work
pgo_data = read_pgo_data(self.get_stdout_path())
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = pgo_data['signs_lines']
Settings.CORRECT_OVERFLOWS = 2
Settings.CORRECT_OVERFLOWS_LINES = pgo_data['overflows_lines']
self.do_run(src, '*186854335,63*\n')
# Sanity check: Without PGO, we will fail
try:
self.do_run(src, '*186854335,63*\n')
except:
pass
def test_exit_status(self):
Settings.CATCH_EXIT_CODE = 1
src = r'''
#include <stdio.h>
#include <stdlib.h>
static void cleanup() {
printf("cleanup\n");
}
int main()
{
atexit(cleanup); // this atexit should still be called
printf("hello, world!\n");
exit(118); // Unusual exit status to make sure it's working!
}
'''
self.do_run(src, 'hello, world!\ncleanup\nExit Status: 118')
# Generate tests for everything
def make_run(fullname, name=-1, compiler=-1, llvm_opts=0, embetter=0, quantum_size=0, typed_arrays=0, emcc_args=None):
exec('''
class %s(T):
def tearDown(self):
super(%s, self).tearDown()
def setUp(self):
super(%s, self).setUp()
Building.COMPILER_TEST_OPTS = ['-g']
os.chdir(self.get_dir()) # Ensure the directory exists and go there
Building.COMPILER = %r
self.emcc_args = %s
if self.emcc_args is not None:
Settings.load(self.emcc_args)
Building.LLVM_OPTS = 0
return
llvm_opts = %d # 1 is yes, 2 is yes and unsafe
embetter = %d
quantum_size = %d
# TODO: Move much of these to a init() function in shared.py, and reuse that
Settings.USE_TYPED_ARRAYS = %d
Settings.INVOKE_RUN = 1
Settings.RELOOP = 0 # we only do them in the "o2" pass
Settings.MICRO_OPTS = embetter
Settings.QUANTUM_SIZE = quantum_size
Settings.ASSERTIONS = 1-embetter
Settings.SAFE_HEAP = 1-(embetter and llvm_opts)
Building.LLVM_OPTS = llvm_opts
Settings.PGO = 0
Settings.CHECK_OVERFLOWS = 1-(embetter or llvm_opts)
Settings.CORRECT_OVERFLOWS = 1-(embetter and llvm_opts)
Settings.CORRECT_SIGNS = 0
Settings.CORRECT_ROUNDINGS = 0
Settings.CORRECT_OVERFLOWS_LINES = CORRECT_SIGNS_LINES = CORRECT_ROUNDINGS_LINES = SAFE_HEAP_LINES = []
Settings.CHECK_SIGNS = 0 #1-(embetter or llvm_opts)
Settings.INIT_STACK = 0
Settings.RUNTIME_TYPE_INFO = 0
Settings.DISABLE_EXCEPTION_CATCHING = 0
Settings.PROFILE = 0
Settings.INCLUDE_FULL_LIBRARY = 0
Settings.BUILD_AS_SHARED_LIB = 0
Settings.RUNTIME_LINKED_LIBS = []
Settings.CATCH_EXIT_CODE = 0
Settings.EMULATE_UNALIGNED_ACCESSES = int(Settings.USE_TYPED_ARRAYS == 2 and Building.LLVM_OPTS == 2)
Settings.DOUBLE_MODE = 1 if Settings.USE_TYPED_ARRAYS and Building.LLVM_OPTS == 0 else 0
Settings.PRECISE_I64_MATH = 0
Building.pick_llvm_opts(3)
TT = %s
''' % (fullname, fullname, fullname, compiler, str(emcc_args), llvm_opts, embetter, quantum_size, typed_arrays, fullname))
return TT
# Make one run with the defaults
exec('default = make_run("default", compiler=CLANG, emcc_args=[])')
# Make one run with -O1, with safe heap
exec('o1 = make_run("o1", compiler=CLANG, emcc_args=["-O1", "-s", "SAFE_HEAP=1"])')
# Make one run with -O2, but without closure (we enable closure in specific tests, otherwise on everything it is too slow)
exec('o2 = make_run("o2", compiler=CLANG, emcc_args=["-O2", "--closure", "0"])')
# Make custom runs with various options
for compiler, quantum, embetter, typed_arrays, llvm_opts in [
(CLANG, 1, 1, 0, 0),
(CLANG, 1, 1, 1, 1),
(CLANG, 4, 0, 0, 0),
(CLANG, 4, 0, 0, 1),
(CLANG, 4, 1, 1, 0),
(CLANG, 4, 1, 1, 1),
]:
fullname = 's_%d_%d%s%s' % (
llvm_opts, embetter, '' if quantum == 4 else '_q' + str(quantum), '' if typed_arrays in [0, 1] else '_t' + str(typed_arrays)
)
exec('%s = make_run(fullname, %r,%r,%d,%d,%d,%d)' % (fullname, fullname, compiler, llvm_opts, embetter, quantum, typed_arrays))
del T # T is just a shape for the specific subclasses, we don't test it itself
class other(RunnerCore):
def test_emcc(self):
for compiler in [EMCC, EMXX]:
shortcompiler = os.path.basename(compiler)
suffix = '.c' if compiler == EMCC else '.cpp'
# --version
output = Popen(['python', compiler, '--version'], stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('''emcc (Emscripten GCC-like replacement) 2.0
Copyright (C) 2011 the Emscripten authors.
This is free and open source software under the MIT license.
There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
''', output[0].replace('\r', ''), output[1].replace('\r', ''))
# --help
output = Popen(['python', compiler, '--help'], stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('''%s [options] file...
Most normal gcc/g++ options will work, for example:
--help Display this information
--version Display compiler version information
Options that are modified or new in %s include:
-O0 No optimizations (default)
''' % (shortcompiler, shortcompiler), output[0].replace('\r', ''), output[1].replace('\r', ''))
# emcc src.cpp ==> writes a.out.js
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world' + suffix)], stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
# properly report source code errors, and stop there
self.clear()
assert not os.path.exists('a.out.js')
output = Popen(['python', compiler, path_from_root('tests', 'hello_world_error' + suffix)], stdout=PIPE, stderr=PIPE).communicate()
assert not os.path.exists('a.out.js'), 'compilation failed, so no output file is expected'
assert len(output[0]) == 0, output[0]
self.assertNotContained('IOError', output[1]) # no python stack
self.assertNotContained('Traceback', output[1]) # no python stack
self.assertContained('error: invalid preprocessing directive', output[1])
self.assertContained("error: use of undeclared identifier 'cheez", output[1])
self.assertContained('2 errors generated', output[1])
assert output[1].split('2 errors generated.')[1].replace('\r', '').replace('\n', '') == 'emcc: compiler frontend failed to generate LLVM bitcode, halting'
# emcc src.cpp -c and emcc src.cpp -o src.[o|bc] ==> should give a .bc file
# regression check: -o js should create "js", with bitcode content
for args in [['-c'], ['-o', 'src.o'], ['-o', 'src.bc'], ['-o', 'src.so'], ['-o', 'js']]:
target = args[1] if len(args) == 2 else 'hello_world.o'
self.clear()
Popen(['python', compiler, path_from_root('tests', 'hello_world' + suffix)] + args, stdout=PIPE, stderr=PIPE).communicate()
syms = Building.llvm_nm(target)
assert len(syms.defs) == 1 and 'main' in syms.defs, 'Failed to generate valid bitcode'
if target == 'js': # make sure emcc can recognize the target as a bitcode file
shutil.move(target, target + '.bc')
target += '.bc'
output = Popen(['python', compiler, target, '-o', target + '.js'], stdout = PIPE, stderr = PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists(target + '.js'), 'Expected %s to exist since args are %s : %s' % (target + '.js', str(args), '\n'.join(output))
self.assertContained('hello, world!', run_js(target + '.js'))
# handle singleton archives
self.clear()
Popen(['python', compiler, path_from_root('tests', 'hello_world' + suffix), '-o', 'a.bc'], stdout=PIPE, stderr=PIPE).communicate()
Popen([LLVM_AR, 'r', 'a.a', 'a.bc'], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('a.a')
output = Popen(['python', compiler, 'a.a']).communicate()
assert os.path.exists('a.out.js'), output
self.assertContained('hello, world!', run_js('a.out.js'))
# emcc src.ll ==> generates .js
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world.ll')], stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
# emcc [..] -o [path] ==> should work with absolute paths
try:
os.mkdir('a_dir')
os.chdir('a_dir')
os.mkdir('b_dir')
for path in [os.path.abspath(os.path.join('..', 'file1.js')), os.path.join('b_dir', 'file2.js')]:
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world.ll'), '-o', path], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(path), path + ' does not exist; ' + '\n'.join(output)
self.assertContained('hello, world!', run_js(path))
finally:
os.chdir(self.get_dir())
try:
shutil.rmtree('a_dir')
except:
pass
# dlmalloc. dlmalloc is special in that it is the only part of libc that is (1) hard to write well, and
# very speed-sensitive. So we do not implement it in JS in library.js, instead we compile it from source
for source, has_malloc in [('hello_world' + suffix, False), ('hello_malloc.cpp', True)]:
self.clear()
output = Popen(['python', compiler, path_from_root('tests', source)], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
generated = open('a.out.js').read()
assert ('function _malloc(bytes) {' in generated) == (not has_malloc), 'If malloc is needed, it should be there, if not not'
# Optimization: emcc src.cpp -o something.js [-Ox]. -O0 is the same as not specifying any optimization setting
for params, opt_level, bc_params, closure, has_malloc in [ # bc params are used after compiling to bitcode
(['-o', 'something.js'], 0, None, 0, 1),
(['-o', 'something.js', '-O0'], 0, None, 0, 0),
(['-o', 'something.js', '-O1'], 1, None, 0, 0),
(['-o', 'something.js', '-O1', '--closure', '1'], 1, None, 1, 0),
(['-o', 'something.js', '-O2'], 2, None, 1, 1),
(['-o', 'something.js', '-O2', '--closure', '0'], 2, None, 0, 0),
(['-o', 'something.js', '-O3'], 3, None, 1, 1),
(['-o', 'something.js', '-O3', '--closure', '0'], 3, None, 0, 0),
# and, test compiling to bitcode first
(['-o', 'something.bc'], 0, [], 0, 0),
(['-o', 'something.bc'], 0, ['-O0'], 0, 0),
(['-o', 'something.bc'], 1, ['-O1'], 0, 0),
(['-o', 'something.bc'], 2, ['-O2'], 1, 0),
(['-o', 'something.bc'], 3, ['-O3'], 1, 0),
(['-O1', '-o', 'something.bc'], 0, [], 0, 0), # -Ox is ignored and warned about
]:
#print params, opt_level, bc_params, closure
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world_loop' + ('_malloc' if has_malloc else '') + '.cpp')] + params,
stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
if bc_params is not None:
if '-O1' in params and 'something.bc' in params:
assert 'warning: -Ox flags ignored, since not generating JavaScript' in output[1]
assert os.path.exists('something.bc'), output[1]
output = Popen(['python', compiler, 'something.bc', '-o', 'something.js'] + bc_params, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('something.js'), output[1]
assert ('Warning: Applying some potentially unsafe optimizations!' in output[1]) == (opt_level >= 3), 'unsafe warning should appear in opt >= 3'
self.assertContained('hello, world!', run_js('something.js'))
# Verify optimization level etc. in the generated code
# XXX these are quite sensitive, and will need updating when code generation changes
generated = open('something.js').read() # TODO: parse out the _main function itself, not support code, if the tests below need that some day
assert 'new Uint16Array' in generated and 'new Uint32Array' in generated, 'typed arrays 2 should be used by default'
assert 'SAFE_HEAP' not in generated, 'safe heap should not be used by default'
assert ': while(' not in generated, 'when relooping we also js-optimize, so there should be no labelled whiles'
if closure:
assert 'Module._main=' in generated, 'closure compiler should have been run (and output should be minified)'
else:
# closure has not been run, we can do some additional checks. TODO: figure out how to do these even with closure
assert 'Module._main = ' not in generated, 'closure compiler should not have been run'
# XXX find a way to test this: assert ('& 255' in generated or '&255' in generated) == (opt_level <= 2), 'corrections should be in opt <= 2'
assert ('(__label__)' in generated) == (opt_level <= 1), 'relooping should be in opt >= 2'
assert ('assert(STACKTOP < STACK_MAX' in generated) == (opt_level == 0), 'assertions should be in opt == 0'
assert 'var $i;' in generated or 'var $i_01;' in generated or 'var $storemerge3;' in generated or 'var $storemerge4;' in generated or 'var $i_04;' in generated, 'micro opts should always be on'
if opt_level >= 1:
assert 'HEAP8[HEAP32[' in generated or 'HEAP8[$vla1 + (($storemerge4 | 0) / 2 & -1) | 0]' in generated or 'HEAP8[$vla1 + (($storemerge4 | 0) / 2 & -1) | 0]' in generated or 'HEAP8[$vla1 + (($i_04 | 0) / 2 & -1) | 0]' in generated or 'HEAP8[$vla1 + ($i_04 / 2 & -1)]' in generated or 'HEAP8[$1 + (($i_01 | 0) / 2 & -1) | 0]' in generated or 'HEAP8[$1 + (($i_01 | 0) / 2 & -1) | 0]' in generated or 'HEAP8[$1 + ($i_01 / 2 & -1)]' in generated, 'eliminator should create compound expressions, and fewer one-time vars'
assert ('_puts(' in generated) == (opt_level >= 1), 'with opt >= 1, llvm opts are run and they should optimize printf to puts'
assert ('function _malloc(bytes) {' in generated) == (not has_malloc), 'If malloc is needed, it should be there, if not not'
assert 'function _main() {' in generated, 'Should be unminified, including whitespace'
assert 'function _dump' in generated, 'No inlining by default'
# emcc -s RELOOP=1 src.cpp ==> should pass -s to emscripten.py. --typed-arrays is a convenient alias for -s USE_TYPED_ARRAYS
for params, test, text in [
(['-s', 'INLINING_LIMIT=0'], lambda generated: 'function _dump' in generated, 'no inlining without opts'),
(['-O1', '-s', 'INLINING_LIMIT=0'], lambda generated: 'function _dump' not in generated, 'inlining'),
(['-s', 'USE_TYPED_ARRAYS=0'], lambda generated: 'new Int32Array' not in generated, 'disable typed arrays'),
(['-s', 'USE_TYPED_ARRAYS=1'], lambda generated: 'IHEAPU = ' in generated, 'typed arrays 1 selected'),
([], lambda generated: 'Module["_dump"]' not in generated, 'dump is not exported by default'),
(['-s', 'EXPORTED_FUNCTIONS=["_main", "_dump"]'], lambda generated: 'Module["_dump"]' in generated, 'dump is now exported'),
(['--typed-arrays', '0'], lambda generated: 'new Int32Array' not in generated, 'disable typed arrays'),
(['--typed-arrays', '1'], lambda generated: 'IHEAPU = ' in generated, 'typed arrays 1 selected'),
(['--typed-arrays', '2'], lambda generated: 'new Uint16Array' in generated and 'new Uint32Array' in generated, 'typed arrays 2 selected'),
(['--llvm-opts', '1'], lambda generated: '_puts(' in generated, 'llvm opts requested'),
]:
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world_loop.cpp'), '-o', 'a.out.js'] + params, stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
assert test(open('a.out.js').read()), text
# Compiling two source files into a final JS.
for args, target in [([], 'a.out.js'), (['-o', 'combined.js'], 'combined.js')]:
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.cpp')] + args,
stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists(target), '\n'.join(output)
self.assertContained('side got: hello from main, over', run_js(target))
# Compiling two files with -c will generate separate .bc files
self.clear()
output = Popen(['python', compiler, path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.cpp'), '-c'] + args,
stdout=PIPE, stderr=PIPE).communicate()
if '-o' in args:
# specifying -o and -c is an error
assert 'fatal error' in output[1], output[1]
continue
assert os.path.exists('twopart_main.o'), '\n'.join(output)
assert os.path.exists('twopart_side.o'), '\n'.join(output)
assert not os.path.exists(target), 'We should only have created bitcode here: ' + '\n'.join(output)
# Compiling one of them alone is expected to fail
output = Popen(['python', compiler, 'twopart_main.o'] + args, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(target), '\n'.join(output)
#print '\n'.join(output)
self.assertContained('is not a function', run_js(target, stderr=STDOUT))
try_delete(target)
# Combining those bc files into js should work
output = Popen(['python', compiler, 'twopart_main.o', 'twopart_side.o'] + args, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(target), '\n'.join(output)
self.assertContained('side got: hello from main, over', run_js(target))
# Combining bc files into another bc should also work
try_delete(target)
assert not os.path.exists(target)
output = Popen(['python', compiler, 'twopart_main.o', 'twopart_side.o', '-o', 'combined.bc'] + args, stdout=PIPE, stderr=PIPE).communicate()
syms = Building.llvm_nm('combined.bc')
assert len(syms.defs) == 2 and 'main' in syms.defs, 'Failed to generate valid bitcode'
output = Popen(['python', compiler, 'combined.bc', '-o', 'combined.bc.js'], stdout = PIPE, stderr = PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('combined.bc.js'), 'Expected %s to exist' % ('combined.bc.js')
self.assertContained('side got: hello from main, over', run_js('combined.bc.js'))
# --js-transform <transform>
self.clear()
trans = os.path.join(self.get_dir(), 't.py')
trans_file = open(trans, 'w')
trans_file.write('''
import sys
f = open(sys.argv[1], 'w')
f.write('transformed!')
f.close()
''')
trans_file.close()
output = Popen(['python', compiler, path_from_root('tests', 'hello_world' + suffix), '--js-transform', 'python t.py'], stdout=PIPE, stderr=PIPE).communicate()
assert open('a.out.js').read() == 'transformed!', 'Transformed output must be as expected'
# TODO: Add in files test a clear example of using disablePermissions, and link to it from the wiki
# TODO: test normal project linking, static and dynamic: get_library should not need to be told what to link!
# TODO: deprecate llvm optimizations, dlmalloc, etc. in emscripten.py.
def test_l_link(self):
# Linking with -lLIBNAME and -L/DIRNAME should work
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'libdir'));
except:
pass
open(os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), '-c']).communicate()
shutil.move(os.path.join(self.get_dir(), 'libfile.o'), os.path.join(self.get_dir(), 'libdir', 'libfile.so'))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile']).communicate()
self.assertContained('hello from lib', run_js(os.path.join(self.get_dir(), 'a.out.js')))
assert not os.path.exists('a.out') and not os.path.exists('a.exe'), 'Must not leave unneeded linker stubs'
def test_local_link(self):
# Linking a local library directly, like /usr/lib/libsomething.so, cannot work of course since it
# doesn't contain bitcode. However, when we see that we should look for a bitcode file for that
# library in the -L paths and system/lib
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'subdir'));
except:
pass
open(os.path.join(self.get_dir(), 'subdir', 'libfile.so'), 'w').write('this is not llvm bitcode!')
open(os.path.join(self.get_dir(), 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'libfile.cpp'), '-o', 'libfile.so']).communicate()
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), os.path.join(self.get_dir(), 'subdir', 'libfile.so'), '-L.']).communicate()
self.assertContained('hello from lib', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_js_libraries(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
extern "C" {
extern void printey();
extern int calcey(int x, int y);
}
int main() {
printey();
printf("*%d*\\n", calcey(10, 22));
return 0;
}
''')
open(os.path.join(self.get_dir(), 'mylib1.js'), 'w').write('''
mergeInto(LibraryManager.library, {
printey: function() {
Module.print('hello from lib!');
}
});
''')
open(os.path.join(self.get_dir(), 'mylib2.js'), 'w').write('''
mergeInto(LibraryManager.library, {
calcey: function(x, y) {
return x + y;
}
});
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--js-library', os.path.join(self.get_dir(), 'mylib1.js'),
'--js-library', os.path.join(self.get_dir(), 'mylib2.js')]).communicate()
self.assertContained('hello from lib!\n*32*\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_identical_basenames(self):
# Issue 287: files in different dirs but with the same basename get confused as the same,
# causing multiply defined symbol errors
try:
os.makedirs(os.path.join(self.get_dir(), 'foo'));
except:
pass
try:
os.makedirs(os.path.join(self.get_dir(), 'bar'));
except:
pass
open(os.path.join(self.get_dir(), 'foo', 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
open(os.path.join(self.get_dir(), 'bar', 'main.cpp'), 'w').write('''
#include<stdio.h>
void printey() { printf("hello there\\n"); }
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'foo', 'main.cpp'), os.path.join(self.get_dir(), 'bar', 'main.cpp')]).communicate()
self.assertContained('hello there', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# ditto with first creating .o files
try_delete(os.path.join(self.get_dir(), 'a.out.js'))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'foo', 'main.cpp'), '-o', os.path.join(self.get_dir(), 'foo', 'main.o')]).communicate()
Popen(['python', EMCC, os.path.join(self.get_dir(), 'bar', 'main.cpp'), '-o', os.path.join(self.get_dir(), 'bar', 'main.o')]).communicate()
Popen(['python', EMCC, os.path.join(self.get_dir(), 'foo', 'main.o'), os.path.join(self.get_dir(), 'bar', 'main.o')]).communicate()
self.assertContained('hello there', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_embed_file(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''hello from a file with lots of data and stuff in it thank you very much''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
int main() {
FILE *f = fopen("somefile.txt", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
return 0;
}
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--embed-file', 'somefile.txt']).communicate()
self.assertContained('|hello from a file wi|', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_multidynamic_link(self):
# Linking the same dynamic library in will error, normally, since we statically link it, causing dupe symbols
# A workaround is to use --ignore-dynamic-linking, see emcc --help for details
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
extern void printey();
extern void printother();
int main() {
printf("*");
printey();
printf("\n");
printother();
printf("\n");
printf("*");
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'libdir'));
except:
pass
open(os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib");
}
''')
open(os.path.join(self.get_dir(), 'libdir', 'libother.cpp'), 'w').write('''
#include <stdio.h>
extern void printey();
void printother() {
printf("|");
printey();
printf("|");
}
''')
# This lets us link the same dynamic lib twice. We will need to link it in manually at the end.
compiler = ['python', EMCC, '--ignore-dynamic-linking']
# Build libfile normally into an .so
Popen(compiler + [os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), '-o', os.path.join(self.get_dir(), 'libdir', 'libfile.so')]).communicate()
# Build libother and dynamically link it to libfile - but add --ignore-dynamic-linking
Popen(compiler + [os.path.join(self.get_dir(), 'libdir', 'libother.cpp'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile', '-o', os.path.join(self.get_dir(), 'libdir', 'libother.so')]).communicate()
# Build the main file, linking in both the libs
Popen(compiler + [os.path.join(self.get_dir(), 'main.cpp'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile', '-lother', '-c']).communicate()
# The normal build system is over. We need to do an additional step to link in the dynamic libraries, since we ignored them before
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.o'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile', '-lother']).communicate()
self.assertContained('*hello from lib\n|hello from lib|\n*', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_js_link(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'before.js'), 'w').write('''
var MESSAGE = 'hello from js';
if (typeof Module != 'undefined') throw 'This code should run before anything else!';
''')
open(os.path.join(self.get_dir(), 'after.js'), 'w').write('''
Module.print(MESSAGE);
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'before.js', '--post-js', 'after.js']).communicate()
self.assertContained('hello from main\nhello from js\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_prepost(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {
preRun: function() { Module.print('pre-run') },
postRun: function() { Module.print('post-run') }
};
''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js']).communicate()
self.assertContained('pre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# never run, so no preRun or postRun
src = open(os.path.join(self.get_dir(), 'a.out.js')).read().replace('// {{PRE_RUN_ADDITIONS}}', 'addRunDependency()')
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
self.assertNotContained('pre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# noInitialRun prevents run
for no_initial_run in [0, 1]:
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp')]).communicate()
src = 'var Module = { noInitialRun: %d };\n' % no_initial_run + open(os.path.join(self.get_dir(), 'a.out.js')).read()
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
assert ('hello from main' in run_js(os.path.join(self.get_dir(), 'a.out.js'))) != no_initial_run, 'only run if no noInitialRun'
if no_initial_run:
# Calling main later should still work, filesystem etc. must be set up.
src = open(os.path.join(self.get_dir(), 'a.out.js')).read() + '\n_main();\n';
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
assert 'hello from main' in run_js(os.path.join(self.get_dir(), 'a.out.js')), 'main should print when called manually'
def test_eliminator(self):
input = open(path_from_root('tools', 'eliminator', 'eliminator-test.js')).read()
expected = open(path_from_root('tools', 'eliminator', 'eliminator-test-output.js')).read()
output = Popen([NODE_JS, COFFEESCRIPT, VARIABLE_ELIMINATOR], stdin=PIPE, stdout=PIPE).communicate(input)[0]
self.assertIdentical(expected, output)
def test_fix_closure(self):
input = path_from_root('tests', 'test-fix-closure.js')
expected = path_from_root('tests', 'test-fix-closure.out.js')
Popen(['python', path_from_root('tools', 'fix_closure.py'), input, 'out.js']).communicate(input)
output = open('out.js').read()
assert '0,zzz_Q_39fa,0' in output
assert 'function(a,c)' not in output # should be uninlined, so it gets a name
assert run_js(input) == run_js('out.js')
def test_js_optimizer(self):
for input, expected, passes in [
(path_from_root('tools', 'test-js-optimizer.js'), open(path_from_root('tools', 'test-js-optimizer-output.js')).read(),
['hoistMultiples', 'loopOptimizer', 'unGlobalize', 'removeAssignsToUndefined', 'simplifyExpressionsPre', 'simplifyExpressionsPost']),
(path_from_root('tools', 'test-js-optimizer-t2c.js'), open(path_from_root('tools', 'test-js-optimizer-t2c-output.js')).read(),
['simplifyExpressionsPre', 'optimizeShiftsConservative']),
(path_from_root('tools', 'test-js-optimizer-t2.js'), open(path_from_root('tools', 'test-js-optimizer-t2-output.js')).read(),
['simplifyExpressionsPre', 'optimizeShiftsAggressive']),
]:
output = Popen([NODE_JS, JS_OPTIMIZER, input] + passes, stdin=PIPE, stdout=PIPE).communicate()[0]
self.assertIdentical(expected, output.replace('\n\n', '\n'))
def test_m_mm(self):
open(os.path.join(self.get_dir(), 'foo.c'), 'w').write('''#include <emscripten.h>''')
for opt in ['M', 'MM']:
output, err = Popen(['python', EMCC, os.path.join(self.get_dir(), 'foo.c'), '-' + opt], stdout=PIPE, stderr=PIPE).communicate()
assert 'foo.o: ' in output, '-%s failed to produce the right output: %s' % (opt, output)
assert 'error' not in err, 'Unexpected stderr: ' + err
def test_llvm_nativizer(self):
# avoid impure_ptr problems etc.
shutil.copyfile(path_from_root('tests', 'files.cpp'), os.path.join(self.get_dir(), 'files.cpp'))
open(os.path.join(self.get_dir(), 'somefile.binary'), 'w').write('''waka waka############################''')
open(os.path.join(self.get_dir(), 'test.file'), 'w').write('''ay file..............,,,,,,,,,,,,,,''')
open(os.path.join(self.get_dir(), 'stdin'), 'w').write('''inter-active''')
Popen(['python', EMCC, os.path.join(self.get_dir(), 'files.cpp'), '-c']).communicate()
Popen(['python', path_from_root('tools', 'nativize_llvm.py'), os.path.join(self.get_dir(), 'files.o')]).communicate(input)[0]
output = Popen([os.path.join(self.get_dir(), 'files.o.run')], stdin=open(os.path.join(self.get_dir(), 'stdin')), stdout=PIPE, stderr=PIPE).communicate()
self.assertIdentical('''size: 37
data: 119,97,107,97,32,119,97,107,97,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35
loop: 119 97 107 97 32 119 97 107 97 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35
input:inter-active
texto
$
5 : 10,30,20,11,88
other=ay file...
seeked= file.
seeked=e...
seeked=,,.
fscanfed: 10 - hello
''', output[0])
self.assertIdentical('texte\n', output[1])
elif 'browser' in str(sys.argv):
# Browser tests.
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(q):
class TestServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
if s.path == '/run_harness':
s.wfile.write(open(path_from_root('tests', 'browser_harness.html')).read())
else:
result = 'False'
if not q.empty():
result = q.get()
s.wfile.write(result)
s.wfile.close()
httpd = BaseHTTPServer.HTTPServer(('localhost', 9999), TestServerHandler)
httpd.serve_forever() # test runner will kill us
def server_func(dir, q):
class TestServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(s):
if 'report_' in s.path:
q.put(s.path)
else:
filename = s.path[1:]
if os.path.exists(filename):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(open(filename).read())
s.wfile.close()
else:
s.send_response(500)
s.send_header("Content-type", "text/html")
s.end_headers()
os.chdir(dir)
httpd = BaseHTTPServer.HTTPServer(('localhost', 8888), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class browser(RunnerCore):
def __init__(self, *args, **kwargs):
super(browser, self).__init__(*args, **kwargs)
if hasattr(browser, 'harness_server'): return
browser.harness_queue = multiprocessing.Queue()
browser.harness_server = multiprocessing.Process(target=harness_server_func, args=(browser.harness_queue,))
browser.harness_server.start()
print '[Browser harness server on process %d]' % browser.harness_server.pid
webbrowser.open_new('http://localhost:9999/run_harness')
def __del__(self):
if not hasattr(browser, 'harness_server'): return
browser.harness_server.terminate()
delattr(browser, 'harness_server')
print '[Browser harness server terminated]'
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def run_browser(self, html_file, message, expectedResult=None):
if expectedResult is not None:
try:
queue = multiprocessing.Queue()
server = multiprocessing.Process(target=functools.partial(server_func, self.get_dir()), args=(queue,))
server.start()
browser.harness_queue.put('http://localhost:8888/' + html_file)
output = '[no http server activity]'
start = time.time()
while time.time() - start < 5:
if not queue.empty():
output = queue.get()
break
time.sleep(0.1)
self.assertIdentical(expectedResult, output)
finally:
server.terminate()
time.sleep(0.1) # see comment about Windows above
else:
webbrowser.open_new(os.path.abspath(html_file))
print 'A web browser window should have opened a page containing the results of a part of this test.'
print 'You need to manually look at the page to see that it works ok: ' + message
print '(sleeping for a bit to keep the directory alive for the web browser..)'
time.sleep(5)
print '(moving on..)'
def with_report_result(self, code):
return code.replace('REPORT_RESULT();', '''
char output[1000];
sprintf(output,
"xhr = new XMLHttpRequest();"
"xhr.open('GET', 'http://localhost:8888/report_result?%d');"
"xhr.send();", result);
emscripten_run_script(output);
emscripten_run_script("setTimeout(function() { window.close() }, 1000)");
''')
def reftest(self, expected):
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'reftest.js'), 'w').write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
for (var x = 0; x < img.width; x++) {
for (var y = 0; y < img.height; y++) {
total += Math.abs(expected[y*img.width*4 + x*4 + 0] - actual[y*img.width*4 + x*4 + 0]);
total += Math.abs(expected[y*img.width*4 + x*4 + 1] - actual[y*img.width*4 + x*4 + 1]);
total += Math.abs(expected[y*img.width*4 + x*4 + 2] - actual[y*img.width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + wrong);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
Module['postRun'] = doReftest;
Module['preRun'] = function() {
setTimeout(doReftest, 0); // if run() throws an exception and postRun is not called, this will kick in
};
''' % basename)
def test_html(self):
# test HTML generation.
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen(['python', EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.html', '--pre-js', 'reftest.js']).communicate()
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK): return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_compression(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("hello compressed world\n");
int result = 1;
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'page.js')), 'must be side js'
assert os.path.exists(os.path.join(self.get_dir(), 'page.js.compress')), 'must be side compressed js'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size > os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be smaller'
shutil.move(os.path.join(self.get_dir(), 'page.js'), 'page.js.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_preload_file(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("somefile.txt", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT();
return 0;
}
'''))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT();
return 0;
}
'''))
# by individual files
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_compressed_file(self):
open(os.path.join(self.get_dir(), 'datafile.txt'), 'w').write('compress this please' + (2000*'.'))
open(os.path.join(self.get_dir(), 'datafile2.txt'), 'w').write('moar' + (100*'!'))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[21];
FILE *f = fopen("datafile.txt", "r");
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("file says: |%s|\n", buf);
int result = !strcmp("compress this please", buf);
FILE *f2 = fopen("datafile2.txt", "r");
fread(buf, 1, 5, f2);
buf[5] = 0;
fclose(f2);
result = result && !strcmp("moar!", buf);
printf("file 2 says: |%s|\n", buf);
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen(['python', EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html', '--preload-file', 'datafile.txt', '--preload-file', 'datafile2.txt',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'datafile.txt')), 'must be data file'
assert os.path.exists(os.path.join(self.get_dir(), 'page.data.compress')), 'must be data file in compressed form'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size != os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be different'
shutil.move(os.path.join(self.get_dir(), 'datafile.txt'), 'datafile.txt.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_image(self):
# load an image file, get pixel data
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '--preload-file', 'screenshot.jpg', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_compressed(self):
for image, width in [(path_from_root('tests', 'screenshot2.png'), 300),
(path_from_root('tests', 'screenshot.jpg'), 600)]:
self.clear()
print image
basename = os.path.basename(image)
shutil.copyfile(image, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()).replace('screenshot.jpg', basename))
self.build_native_lzma()
Popen(['python', EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '--preload-file', basename, '-o', 'page.html',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
shutil.move(os.path.join(self.get_dir(), basename), basename + '.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?' + str(width))
def test_sdl_canvas(self):
open(os.path.join(self.get_dir(), 'sdl_canvas.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_canvas.c')).read()))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'sdl_canvas.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_key(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
_one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
dispatchEvent(event);
var event2 = document.createEvent("KeyboardEvent");
event2.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
dispatchEvent(event2);
}
''')
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?510510')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?740')
def test_sdl_audio(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmvictory_1.ogg'), os.path.join(self.get_dir(), 'sound.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmcreatemiltaryfoot_1.wav'), os.path.join(self.get_dir(), 'sound2.wav'))
open(os.path.join(self.get_dir(), 'sdl_audio.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio.c')).read()))
# use closure to check for a possible bug with closure minifying away newer Audio() attributes
Popen(['python', EMCC, '-O2', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio.c'), '--preload-file', 'sound.ogg', '--preload-file', 'sound2.wav', '-o', 'page.html', '-s', 'EXPORTED_FUNCTIONS=["_main", "_play", "_play2"]']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen(['python', EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl_ogl(self):
# SDL, OpenGL, textures, immediate mode
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.reftest(path_from_root('tests', 'screenshot-gray-purple.png'))
Popen(['python', EMCC, path_from_root('tests', 'sdl_ogl.c'), '-o', 'something.html', '--pre-js', 'reftest.js', '--preload-file', 'screenshot.png']).communicate()
self.run_browser('something.html', 'You should see an image with gray at the top.', '/report_result?0')
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.reftest(path_from_root('tests', 'screenshot-gray.png'))
Popen(['python', EMCC, path_from_root('tests', 'sdl_ogl_p.c'), '-o', 'something.html', '--pre-js', 'reftest.js', '--preload-file', 'screenshot.png']).communicate()
self.run_browser('something.html', 'You should see an image with gray at the top.', '/report_result?0')
def test_worker(self):
# Test running in a web worker
output = Popen(['python', EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'], stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('worker.js'), output
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''')
html_file.close()
self.run_browser('main.html', 'You should see that the worker was called, and said "hello from worker!"', '/report_result?hello%20from%20worker!')
def test_glgears(self):
self.reftest(path_from_root('tests', 'gears.png'))
Popen(['python', EMCC, path_from_root('tests', 'hello_world_gles.c'), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '--pre-js', 'reftest.js']).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_result?0')
def test_glgears_animation(self):
Popen(['python', EMCC, path_from_root('tests', 'hello_world_gles.c'), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')]).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
def test_glgears_bad(self):
# Make sure that OpenGL ES is not available if typed arrays are not used
Popen(['python', EMCC, path_from_root('tests', 'hello_world_gles.c'), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS',
'-s', 'USE_TYPED_ARRAYS=0',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')]).communicate()
self.run_browser('something.html', 'You should not see animating gears.', '/report_gl_result?false')
def test_glgears_deriv(self):
self.reftest(path_from_root('tests', 'gears.png'))
Popen(['python', EMCC, path_from_root('tests', 'hello_world_gles_deriv.c'), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '--pre-js', 'reftest.js']).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_result?0')
src = open('something.html').read()
assert 'gl-matrix' not in src, 'Should not include glMatrix when not needed'
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
for program in programs:
print program
basename = os.path.basename(program)
args = []
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args = ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args = ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.reftest(path_from_root('tests', 'glbook', basename.replace('.bc', '.png')))
Popen(['python', EMCC, program, '-o', 'program.html', '--pre-js', 'reftest.js'] + args).communicate()
self.run_browser('program.html', '', '/report_result?0')
def btest(self, filename, expected=None, reference=None, args=[]): # TODO: use in all other tests
if not reference:
open(os.path.join(self.get_dir(), filename), 'w').write(self.with_report_result(open(path_from_root('tests', filename)).read()))
else:
expected = '0' # 0 pixels difference than reference
shutil.copyfile(path_from_root('tests', filename), os.path.join(self.get_dir(), filename))
self.reftest(path_from_root('tests', reference))
args += ['--pre-js', 'reftest.js']
Popen(['python', EMCC, os.path.join(self.get_dir(), filename), '-o', 'test.html'] + args).communicate()
self.run_browser('test.html', '.', '/report_result?' + expected)
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1')
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png')
def zzztest_cubegeom(self):
self.btest('cubegeom.c', reference='sdlglshader.png', args=['-s', 'GL_DEBUG=1'])
elif 'benchmark' in str(sys.argv):
# Benchmarks. Run them with argument |benchmark|. To run a specific test, do
# |benchmark.test_X|.
fingerprint = [time.asctime()]
try:
fingerprint.append('em: ' + Popen(['git', 'show'], stdout=PIPE).communicate()[0].split('\n')[0])
except:
pass
try:
d = os.getcwd()
os.chdir(os.path.expanduser('~/Dev/mozilla-central'))
fingerprint.append('sm: ' + filter(lambda line: 'changeset' in line,
Popen(['hg', 'tip'], stdout=PIPE).communicate()[0].split('\n'))[0])
except:
pass
finally:
os.chdir(d)
fingerprint.append('llvm: ' + LLVM_ROOT)
print 'Running Emscripten benchmarks... [ %s ]' % ' | '.join(fingerprint)
sys.argv = filter(lambda x: x != 'benchmark', sys.argv)
assert(os.path.exists(CLOSURE_COMPILER))
try:
index = SPIDERMONKEY_ENGINE.index("options('strict')")
SPIDERMONKEY_ENGINE = SPIDERMONKEY_ENGINE[:index-1] + SPIDERMONKEY_ENGINE[index+1:] # closure generates non-strict
except:
pass
Building.COMPILER = CLANG
# Pick the JS engine to benchmark. If you specify one, it will be picked. For example, python tests/runner.py benchmark SPIDERMONKEY_ENGINE
JS_ENGINE = JS_ENGINES[0]
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
if not arg.startswith('benchmark.test_'):
JS_ENGINE = eval(arg)
sys.argv[i] = None
sys.argv = filter(lambda arg: arg is not None, sys.argv)
print 'Benchmarking JS engine:', JS_ENGINE
Building.COMPILER_TEST_OPTS = []
TEST_REPS = 10
TOTAL_TESTS = 9
tests_done = 0
total_times = map(lambda x: 0., range(TOTAL_TESTS))
total_native_times = map(lambda x: 0., range(TOTAL_TESTS))
class benchmark(RunnerCore):
def print_stats(self, times, native_times, last=False):
mean = sum(times)/len(times)
squared_times = map(lambda x: x*x, times)
mean_of_squared = sum(squared_times)/len(times)
std = math.sqrt(mean_of_squared - mean*mean)
sorted_times = times[:]
sorted_times.sort()
median = sum(sorted_times[len(sorted_times)/2 - 1:len(sorted_times)/2 + 1])/2
mean_native = sum(native_times)/len(native_times)
squared_native_times = map(lambda x: x*x, native_times)
mean_of_squared_native = sum(squared_native_times)/len(native_times)
std_native = math.sqrt(mean_of_squared_native - mean_native*mean_native)
sorted_native_times = native_times[:]
sorted_native_times.sort()
median_native = sum(sorted_native_times[len(sorted_native_times)/2 - 1:len(sorted_native_times)/2 + 1])/2
final = mean / mean_native
if last:
norm = 0
for i in range(len(times)):
norm += times[i]/native_times[i]
norm /= len(times)
print
print ' JavaScript: %.3f Native: %.3f Ratio: %.3f Normalized ratio: %.3f' % (mean, mean_native, final, norm)
return
print
print ' JavaScript: mean: %.3f (+-%.3f) secs median: %.3f range: %.3f-%.3f (noise: %3.3f%%) (%d runs)' % (mean, std, median, min(times), max(times), 100*std/mean, TEST_REPS)
print ' Native : mean: %.3f (+-%.3f) secs median: %.3f range: %.3f-%.3f (noise: %3.3f%%) JS is %.2f X slower' % (mean_native, std_native, median_native, min(native_times), max(native_times), 100*std_native/mean_native, final)
def do_benchmark(self, src, args=[], expected_output='FAIL', emcc_args=[]):
dirname = self.get_dir()
filename = os.path.join(dirname, 'src.cpp')
f = open(filename, 'w')
f.write(src)
f.close()
final_filename = os.path.join(dirname, 'src.js')
try_delete(final_filename)
output = Popen(['python', EMCC, filename, '-O3',
'-s', 'INLINING_LIMIT=0',
'-s', 'TOTAL_MEMORY=100*1024*1024', '-s', 'FAST_MEMORY=10*1024*1024',
'-o', final_filename] + emcc_args, stdout=PIPE, stderr=self.stderr_redirect).communicate()
assert os.path.exists(final_filename), 'Failed to compile file: ' + '\n'.join(output)
# Run JS
global total_times, tests_done
times = []
for i in range(TEST_REPS):
start = time.time()
js_output = self.run_generated_code(JS_ENGINE, final_filename, args, check_timeout=False)
curr = time.time()-start
times.append(curr)
total_times[tests_done] += curr
if i == 0:
# Sanity check on output
self.assertContained(expected_output, js_output)
# Run natively
self.build_native(filename)
global total_native_times
native_times = []
for i in range(TEST_REPS):
start = time.time()
self.run_native(filename, args)
curr = time.time()-start
native_times.append(curr)
total_native_times[tests_done] += curr
self.print_stats(times, native_times)
tests_done += 1
if tests_done == TOTAL_TESTS:
print 'Total stats:',
self.print_stats(total_times, total_native_times, last=True)
def test_primes(self):
src = '''
#include<stdio.h>
#include<math.h>
int main() {
int primes = 0, curri = 2;
while (primes < 100000) {
int ok = true;
for (int j = 2; j < sqrtf(curri); j++) {
if (curri % j == 0) {
ok = false;
break;
}
}
if (ok) {
primes++;
}
curri++;
}
printf("lastprime: %d.\\n", curri-1);
return 1;
}
'''
self.do_benchmark(src, [], 'lastprime: 1297001.')
def test_memops(self):
src = '''
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
int main() {
int N = 1024*1024;
int M = 190;
int final = 0;
char *buf = (char*)malloc(N);
for (int t = 0; t < M; t++) {
for (int i = 0; i < N; i++)
buf[i] = (i + final)%256;
for (int i = 0; i < N; i++)
final += buf[i] & 1;
final = final % 1000;
}
printf("final: %d.\\n", final);
return 1;
}
'''
self.do_benchmark(src, [], 'final: 720.')
def zzztest_files(self):
src = r'''
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include <unistd.h>
int main() {
int N = 100;
int M = 1000;
int K = 1000;
unsigned char *k = (unsigned char*)malloc(K+1), *k2 = (unsigned char*)malloc(K+1);
for (int i = 0; i < K; i++) {
k[i] = (i % 250) + 1;
}
k[K] = 0;
char buf[100];
for (int i = 0; i < N; i++) {
sprintf(buf, "/dev/shm/file-%d.dat", i);
FILE *f = fopen(buf, "w");
for (int j = 0; j < M; j++) {
fwrite(k, 1, (j % K) + 1, f);
}
fclose(f);
}
for (int i = 0; i < N; i++) {
sprintf(buf, "/dev/shm/file-%d.dat", i);
FILE *f = fopen(buf, "r");
for (int j = 0; j < M; j++) {
fread(k2, 1, (j % K) + 1, f);
}
fclose(f);
for (int j = 0; j < K; j++) {
assert(k[j] == k2[j]);
}
unlink(buf);
}
printf("ok");
return 1;
}
'''
self.do_benchmark(src, [], 'ok')
def test_copy(self):
src = r'''
#include<stdio.h>
struct vec {
int x, y, z;
int r, g, b;
vec(int x_, int y_, int z_, int r_, int g_, int b_) : x(x_), y(y_), z(z_), r(r_), g(g_), b(b_) {}
static vec add(vec a, vec b) {
return vec(a.x+b.x, a.y+b.y, a.z+b.z, a.r+b.r, a.g+b.g, a.b+b.b);
}
void norm() {
x %= 1024;
y %= 1024;
z %= 1024;
r %= 1024;
b %= 1024;
g %= 1024;
}
int sum() { return x + y + z + r + g + b; }
};
int main() {
int total = 0;
for (int i = 0; i < 1250; i++) {
for (int j = 0; j < 1000; j++) {
vec c(i, i+i%10, j*2, i%255, j%120, i%15);
vec d(j+i%10, j*2, j%255, i%120, j%15, j);
vec e = c;
c.norm();
d.norm();
vec f = vec::add(c, d);
f = vec::add(e, f);
f.norm();
f = vec::add(d, f);
total += f.sum() % 100;
total %= 10240;
}
}
printf("sum:%d\n", total);
return 1;
}
'''
self.do_benchmark(src, [], 'sum:9928\n', emcc_args=['-s', 'QUANTUM_SIZE=4', '-s', 'USE_TYPED_ARRAYS=2'])
def test_fannkuch(self):
src = open(path_from_root('tests', 'fannkuch.cpp'), 'r').read()
self.do_benchmark(src, ['10'], 'Pfannkuchen(10) = 38.')
def test_corrections(self):
src = r'''
#include<stdio.h>
#include<math.h>
int main() {
int N = 4100;
int M = 4100;
unsigned int f = 0;
unsigned short s = 0;
for (int t = 0; t < M; t++) {
for (int i = 0; i < N; i++) {
f += i / ((t % 5)+1);
if (f > 1000) f /= (t % 3)+1;
if (i % 4 == 0) f += sqrtf(i) * (i % 8 == 0 ? 1 : -1);
s += (short(f)*short(f)) % 256;
}
}
printf("final: %d:%d.\n", f, s);
return 1;
}
'''
self.do_benchmark(src, [], 'final: 826:14324.', emcc_args=['-s', 'CORRECT_SIGNS=1', '-s', 'CORRECT_OVERFLOWS=1', '-s', 'CORRECT_ROUNDINGS=1'])
def fasta(self, double_rep):
src = open(path_from_root('tests', 'fasta.cpp'), 'r').read().replace('double', double_rep)
self.do_benchmark(src, ['2100000'], '''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA\nTCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT\nAAAAATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAG\nGCTGAGGCAGGAGAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCG\nCCACTGCACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAAGGCCGGGCGCGGT\nGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACCTGAGGTCA\nGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAATACAAAAA\nTTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCAGCTACTCGGGAGGCTGAGGCAGGAG\nAATCGCTTGAACCCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCCA\nGCCTGGGCGA''')
def test_fasta_float(self):
self.fasta('float')
def zzztest_fasta_double(self):
self.fasta('double')
def test_skinning(self):
src = open(path_from_root('tests', 'skinning_test_no_simd.cpp'), 'r').read()
self.do_benchmark(src, ['10000', '1000'], 'blah=0.000000')
def test_dlmalloc(self):
# XXX This seems to have regressed slightly with emcc. Are -g and the signs lines passed properly?
src = open(path_from_root('system', 'lib', 'dlmalloc.c'), 'r').read() + '\n\n\n' + open(path_from_root('tests', 'dlmalloc_test.c'), 'r').read()
self.do_benchmark(src, ['400', '400'], '*400,0*', emcc_args=['-g', '-s', 'CORRECT_SIGNS=2', '-s', 'CORRECT_SIGNS_LINES=[4820, 4195, 4250, 4203, 4209, 4239, 4231]'])
elif 'sanity' in str(sys.argv):
# Run some sanity checks on the test runner and emcc.
sys.argv = filter(lambda x: x != 'sanity', sys.argv)
print
print 'Running sanity checks.'
print 'WARNING: This will modify %s, and in theory can break it although it should be restored properly. A backup will be saved in %s_backup' % (EM_CONFIG, EM_CONFIG)
print
assert os.path.exists(CONFIG_FILE), 'To run these tests, we need a (working!) %s file to already exist' % EM_CONFIG
shutil.copyfile(CONFIG_FILE, CONFIG_FILE + '_backup')
def restore():
shutil.copyfile(CONFIG_FILE + '_backup', CONFIG_FILE)
SANITY_FILE = CONFIG_FILE + '_sanity'
def wipe():
try_delete(CONFIG_FILE)
try_delete(SANITY_FILE)
commands = [[EMCC], ['python', path_from_root('tests', 'runner.py'), 'blahblah']]
def mtime(filename):
return os.stat(filename).st_mtime
class sanity(RunnerCore):
def setUp(self):
wipe()
def tearDown(self):
restore()
def do(self, command):
if type(command) is not list:
command = [command]
if command[0] == EMCC:
command = ['python'] + command
return Popen(command, stdout=PIPE, stderr=STDOUT).communicate()[0]
def check_working(self, command, expected=None):
if type(command) is not list:
command = [command]
if expected is None:
if command[0] == EMCC:
expected = 'no input files'
else:
expected = "has no attribute 'blahblah'"
output = self.do(command)
self.assertContained(expected, output)
return output
def test_aaa_normal(self): # this should be the very first thing that runs. if this fails, everything else is irrelevant!
for command in commands:
# Your existing EM_CONFIG should work!
restore()
self.check_working(command)
def test_firstrun(self):
for command in commands:
wipe()
output = self.do(command)
self.assertContained('Welcome to Emscripten!', output)
self.assertContained('This is the first time any of the Emscripten tools has been run.', output)
self.assertContained('A settings file has been copied to %s, at absolute path: %s' % (EM_CONFIG, CONFIG_FILE), output)
self.assertContained('Please edit that file and change the paths to fit your system', output)
self.assertContained('make sure LLVM_ROOT and NODE_JS are correct', output)
self.assertContained('This command will now exit. When you are done editing those paths, re-run it.', output)
assert output.split()[-1].endswith('===='), 'We should have stopped: ' + output
assert (open(CONFIG_FILE).read() == open(path_from_root('settings.py')).read()), 'Settings should be copied from settings.py'
# Second run, with bad EM_CONFIG
for settings in ['blah', 'LLVM_ROOT="blah"; JS_ENGINES=[]; COMPILER_ENGINE=NODE_JS=SPIDERMONKEY_ENGINE=[]']:
f = open(CONFIG_FILE, 'w')
f.write(settings)
f.close()
output = self.do(command)
if 'LLVM_ROOT' not in settings:
self.assertContained('Error in evaluating %s' % EM_CONFIG, output)
else:
self.assertContained('FATAL', output) # sanity check should fail
def test_closure_compiler(self):
CLOSURE_FATAL = 'fatal: Closure compiler'
CLOSURE_WARNING = 'WARNING: Closure compiler'
# Sanity check should find closure
restore()
output = self.check_working(EMCC)
self.assertNotContained(CLOSURE_FATAL, output)
self.assertNotContained(CLOSURE_WARNING, output)
# Append a bad path for closure, will warn
f = open(CONFIG_FILE, 'a')
f.write('CLOSURE_COMPILER = "/tmp/nowhere/nothingtoseehere/kjadsfkjwelkjsdfkqgas/nonexistent.txt"\n')
f.close()
output = self.check_working(EMCC, CLOSURE_WARNING)
# And if you actually try to use the bad path, will be fatal
f = open(CONFIG_FILE, 'a')
f.write('CLOSURE_COMPILER = "/tmp/nowhere/nothingtoseehere/kjadsfkjwelkjsdfkqgas/nonexistent.txt"\n')
f.close()
output = self.check_working([EMCC, '-O2', 'tests/hello_world.cpp'], CLOSURE_FATAL)
# With a working path, all is well
restore()
try_delete('a.out.js')
output = self.check_working([EMCC, '-O2', 'tests/hello_world.cpp'], '')
assert os.path.exists('a.out.js')
def test_emcc(self):
SANITY_MESSAGE = 'Emscripten: Running sanity checks'
SANITY_FAIL_MESSAGE = 'sanity check failed to run'
# emcc should check sanity if no ${EM_CONFIG}_sanity
restore()
time.sleep(0.1)
assert not os.path.exists(SANITY_FILE) # restore is just the settings, not the sanity
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
assert os.path.exists(SANITY_FILE) # EMCC should have checked sanity successfully
assert mtime(SANITY_FILE) >= mtime(CONFIG_FILE)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc run again should not sanity check, because the sanity file is newer
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# But the test runner should
output = self.check_working(commands[1])
self.assertContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# Make sure the test runner didn't do anything to the setup
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc should also check sanity if the file is outdated
time.sleep(0.1)
restore()
assert mtime(SANITY_FILE) < mtime(CONFIG_FILE)
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
assert mtime(SANITY_FILE) >= mtime(CONFIG_FILE)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc should be configurable directly from EM_CONFIG without any config file
restore()
config = open(CONFIG_FILE, 'r').read()
os.environ['EM_CONFIG'] = config
wipe()
dirname = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=TEMP_DIR)
open(os.path.join(dirname, 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from emcc with no config file\\n");
return 0;
}
''')
Popen(['python', EMCC, os.path.join(dirname, 'main.cpp'), '-o', os.path.join(dirname, 'a.out.js')]).communicate()
self.assertContained('hello from emcc with no config file', run_js(os.path.join(dirname, 'a.out.js')))
del os.environ['EM_CONFIG']
shutil.rmtree(dirname)
def test_emcc_caching(self):
INCLUDING_MESSAGE = 'emcc: including X'
BUILDING_MESSAGE = 'emcc: building X for cache'
EMCC_CACHE = Cache.dirname
restore()
Cache.erase()
assert not os.path.exists(EMCC_CACHE)
try:
emcc_debug = os.environ.get('EMCC_DEBUG')
os.environ['EMCC_DEBUG'] ='1'
# Building a file that doesn't need cached stuff should not trigger cache generation
output = self.do([EMCC, path_from_root('tests', 'hello_world.cpp')])
assert INCLUDING_MESSAGE.replace('X', 'dlmalloc') not in output
assert BUILDING_MESSAGE.replace('X', 'dlmalloc') not in output
self.assertContained('hello, world!', run_js('a.out.js'))
assert not os.path.exists(EMCC_CACHE)
try_delete('a.out.js')
basebc_name = os.path.join(TEMP_DIR, 'emscripten_temp', 'emcc-0-basebc.bc')
dcebc_name = os.path.join(TEMP_DIR, 'emscripten_temp', 'emcc-1-dce.bc')
# Building a file that *does* need dlmalloc *should* trigger cache generation, but only the first time
for filename, libname in [('hello_malloc.cpp', 'dlmalloc'), ('hello_libcxx.cpp', 'libcxx')]:
for i in range(3):
try_delete(basebc_name) # we might need to check this file later
try_delete(dcebc_name) # we might need to check this file later
output = self.do([EMCC, path_from_root('tests', filename)])
assert INCLUDING_MESSAGE.replace('X', libname) in output
if libname == 'dlmalloc':
assert INCLUDING_MESSAGE.replace('X', 'libcxx') not in output # we don't need libcxx in this code
else:
assert INCLUDING_MESSAGE.replace('X', 'dlmalloc') in output # libcxx always forces inclusion of dlmalloc
assert (BUILDING_MESSAGE.replace('X', libname) in output) == (i == 0), 'Must only build the first time'
self.assertContained('hello, world!', run_js('a.out.js'))
assert os.path.exists(EMCC_CACHE)
assert os.path.exists(os.path.join(EMCC_CACHE, libname + '.bc'))
if libname == 'libcxx':
assert os.stat(os.path.join(EMCC_CACHE, libname + '.bc')).st_size > 4000000, 'libc++ is big'
assert os.stat(basebc_name).st_size > 4000000, 'libc++ is indeed big'
assert os.stat(dcebc_name).st_size < 2000000, 'Dead code elimination must remove most of libc++'
finally:
if emcc_debug:
os.environ['EMCC_DEBUG'] = emcc_debug
else:
raise Exception('Test runner is confused: ' + str(sys.argv))
if __name__ == '__main__':
sys.argv = [sys.argv[0]] + ['-v'] + sys.argv[1:] # Verbose output by default
# Sanity checks
check_sanity(force=True)
total_engines = len(JS_ENGINES)
JS_ENGINES = filter(check_engine, JS_ENGINES)
if len(JS_ENGINES) == 0:
print 'WARNING: None of the JS engines in JS_ENGINES appears to work.'
elif len(JS_ENGINES) < total_engines:
print 'WARNING: Not all the JS engines in JS_ENGINES appears to work, ignoring those.'
# Go
unittest.main()
|
example_2_sensel_contacts.py | #!/usr/bin/env python
##########################################################################
# MIT License
#
# Copyright (c) 2013-2017 Sensel, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
##########################################################################
# Python 3 compatibility
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import sys
import threading
sys.path.append('../../sensel-lib-wrappers/sensel-lib-python')
import sensel
enter_pressed = False
def wait_for_enter():
global enter_pressed
input('Press Enter to exit...')
enter_pressed = True
return
def open_sensel():
handle = None
(error, device_list) = sensel.getDeviceList()
if device_list.num_devices != 0:
(error, handle) = sensel.openDeviceByID(device_list.devices[0].idx)
return handle
def init_frame():
error = sensel.setFrameContent(handle, sensel.FRAME_CONTENT_CONTACTS_MASK)
error, frame = sensel.allocateFrameData(handle)
error = sensel.startScanning(handle)
return frame
def scan_frames(frame, info):
error = sensel.readSensor(handle)
error, num_frames = sensel.getNumAvailableFrames(handle)
for i in range(num_frames):
error = sensel.getFrame(handle, frame)
print_frame(frame, info)
def print_frame(frame, info):
if frame.n_contacts:
print()
print('Num Contacts:', frame.n_contacts)
for n in range(frame.n_contacts):
c = frame.contacts[n]
print('Contact ID:', c.id)
if c.state == sensel.CONTACT_START:
sensel.setLEDBrightness(handle, c.id, 100)
elif c.state == sensel.CONTACT_END:
sensel.setLEDBrightness(handle, c.id, 0)
def close_sensel(frame):
error = sensel.freeFrameData(handle, frame)
error = sensel.stopScanning(handle)
error = sensel.close(handle)
if __name__ == '__main__':
handle = open_sensel()
if handle:
error, info = sensel.getSensorInfo(handle)
frame = init_frame()
t = threading.Thread(target=wait_for_enter)
t.start()
while not enter_pressed:
scan_frames(frame, info)
close_sensel(frame)
|
scanner.py | #coding:utf-8
#!/usr/bin/env python
'''
______ __ __
/\__ _\ __ /\ \ /\ \__
\/_/\ \/ ___ ____/\_\ __\ \ \___\ \ ,_\
\ \ \ /' _ `\ /',__\/\ \ /'_ `\ \ _ `\ \ \/
\_\ \__/\ \/\ \/\__, `\ \ \/\ \L\ \ \ \ \ \ \ \_
/\_____\ \_\ \_\/\____/\ \_\ \____ \ \_\ \_\ \__\
\/_____/\/_/\/_/\/___/ \/_/\/___L\ \/_/\/_/\/__/
/\____/
\_/__/
__ __
/\ \ /\ \
\ \ \ __ \ \ \____ ____
\ \ \ __ /'__`\ \ \ '__`\ /',__\
\ \ \L\ \/\ \L\.\_\ \ \L\ \/\__, `\
\ \____/\ \__/.\_\\ \_,__/\/\____/
\/___/ \/__/\/_/ \/___/ \/___/
'''
import platform
import sys
import socket as sk
import httplib
from subprocess import Popen, PIPE
import re
from optparse import OptionParser
import threading
from threading import Thread
from Queue import Queue
NUM = 50
PORTS=[21,22,23,25,80,81,110,135,139,389,443,445,873,1433,1434,1521,2433,3306,3307,3389,5800,5900,8080,22222,22022,27017,28017]
URLS=['','phpinfo.php','phpmyadmin/','xmapp/','zabbix/','jmx-console/','.svn/entries','nagios/','index.action','login.action']
# convert an IP address from its dotted-quad format to its
# 32 binary digit representation
def ip2bin(ip):
b = ""
inQuads = ip.split(".")
outQuads = 4
for q in inQuads:
if q != "":
b += dec2bin(int(q),8)
outQuads -= 1
while outQuads > 0:
b += "00000000"
outQuads -= 1
return b
# convert a decimal number to binary representation
# if d is specified, left-pad the binary number with 0s to that length
def dec2bin(n,d=None):
s = ""
while n>0:
if n&1:
s = "1"+s
else:
s = "0"+s
n >>= 1
if d is not None:
while len(s)<d:
s = "0"+s
if s == "": s = "0"
return s
# convert a binary string into an IP address
def bin2ip(b):
ip = ""
for i in range(0,len(b),8):
ip += str(int(b[i:i+8],2))+"."
return ip[:-1]
# print a list of IP addresses based on the CIDR block specified
def listCIDR(c):
cidrlist=[]
parts = c.split("/")
baseIP = ip2bin(parts[0])
subnet = int(parts[1])
# Python string-slicing weirdness:
# "myString"[:-1] -> "myStrin" but "myString"[:0] -> ""
# if a subnet of 32 was specified simply print the single IP
if subnet == 32:
print bin2ip(baseIP)
# for any other size subnet, print a list of IP addresses by concatenating
# the prefix with each of the suffixes in the subnet
else:
ipPrefix = baseIP[:-(32-subnet)]
for i in range(2**(32-subnet)):
cidrlist.append(bin2ip(ipPrefix+dec2bin(i, (32-subnet))))
return cidrlist
# input validation routine for the CIDR block specified
def validateCIDRBlock(b):
# appropriate format for CIDR block ($prefix/$subnet)
p = re.compile("^([0-9]{1,3}\.){0,3}[0-9]{1,3}(/[0-9]{1,2}){1}$")
if not p.match(b):
print "Error: Invalid CIDR format!"
return False
# extract prefix and subnet size
prefix, subnet = b.split("/")
# each quad has an appropriate value (1-255)
quads = prefix.split(".")
for q in quads:
if (int(q) < 0) or (int(q) > 255):
print "Error: quad "+str(q)+" wrong size."
return False
# subnet is an appropriate value (1-32)
if (int(subnet) < 1) or (int(subnet) > 32):
print "Error: subnet "+str(subnet)+" wrong size."
return False
# passed all checks -> return True
return True
def pinger():
global pinglist
while True:
ip=q.get()
if platform.system()=='Linux':
p=Popen(['ping','-c 2',ip],stdout=PIPE)
m = re.search('(.*)\srecieved', p.stdout.read())
if m!=0:
pinglist.append(ip)
if platform.system()=='Windows':
p=Popen('ping -n 2 ' + ip, stdout=PIPE)
m = re.search('TTL', p.stdout.read())
if m:
pinglist.append(ip)
q.task_done()
def scanipport():
global lock
while True:
host,port=sq.get()
sd=sk.socket(sk.AF_INET, sk.SOCK_STREAM)
try:
sd.connect((host,port))
if options.genlist==True:
if port not in ipdict:
ipdict[port]=[]
ipdict[port].append(host)
else:
ipdict[port].append(host)
else:
lock.acquire()
print "%s:%d OPEN" % (host, port)
lock.release()
sd.close()
if options.downpage==True and port in [80,81,1080,8080]:
dlpage(ip,port)
except:
pass
sq.task_done()
def dlpage(ip,port):
global page,lock
page+='<h1>'+ip+':'+str(port)+'</h1><br>'
for url in URLS:
c=httplib.HTTPConnection(ip+':'+str(port))
c.request('GET','/'+url)
r=c.getresponse()
#print url,r.status
if r.status in [200,301,302]:
if url=='':
url='Homepage'
lock.acquire()
print ip+':'+str(port),url,'exists'
page+='<h2>'+url+'</h2><br>'+r.read()
lock.release()
c.close()
if __name__ == "__main__":
usage="usage: InsightScan.py <hosts[/24|/CIDR]> [start port] [end port] -t threads\n\nExample: InsightScan.py 192.168.0.0/24 1 1024 -t 20"
parser = OptionParser(usage=usage)
parser.add_option("-t", "--threads", dest="NUM",help="Maximum threads, default 50")
parser.add_option("-p", "--portlist", dest="PORTS",help="Customize port list, separate with ',' example: 21,22,23,25 ...")
parser.add_option("-N", '--noping', action="store_true", dest="noping",help="Skip ping sweep, port scan whether targets are alive or not")
parser.add_option("-P", '--pingonly', action="store_true", dest="noscan",help="Ping scan only,disable port scan")
parser.add_option("-d", '--downpage', action="store_true", dest="downpage",help="Download and save HTML pages from HTTP ports(80,81,8080), also detects some web apps")
parser.add_option("-l", '--genlist', action="store_true", dest="genlist",help="Output a list, ordered by port number,for THC-Hydra IP list")
parser.add_option("-L", '--genfile', action="store_true", dest="genfile",help="Put the IP list in separate files named by port number. Implies -l option.\nExample: IPs with port 445 opened will be put into 445.txt")
(options, args) = parser.parse_args()
if options.NUM !=None and options.NUM!=0:
NUM=int(options.NUM)
print 'Scanning with',NUM,'threads...'
if len(args)<1:
parser.print_help()
sys.exit()
if options.noping== True and options.noscan == True:
print 'ERROR: Cannot use -N and -P together'
sys.exit()
iplist=[]
ipaddr=args[0]
if len(args)==2:
print 'Must specify end port'
sys.exit()
try:
sk.inet_aton(ipaddr)
iplist.append(ipaddr)
except:
if not validateCIDRBlock(ipaddr):
print 'IP address not valid!'
sys.exit()
else:
iplist=listCIDR(ipaddr)
if len(args)==3:
startport=int(args[1])
endport=int(args[2])
if startport>endport:
print 'start port must be smaller or equal to end port'
sys.exit()
PORTS=[]
for i in xrange(startport,endport+1):
PORTS.append(i)
if options.PORTS!= None:
PORTS=[int(pn) for pn in options.PORTS.split(',') ]
global page
page=''
#start ping threads
if options.noping != True:
print "Scanning for live machines...\n"
global pinglist
q=Queue()
pinglist=[]
for i in range(NUM):
t = Thread(target=pinger)
t.setDaemon(True)
t.start()
for ip in iplist:
q.put(ip)
q.join()
else:
pinglist=iplist
#print pinglist
if options.noscan == True:
for host in pinglist:
print host,
sys.exit()
if len(pinglist)==0:
print 'No live machines detected. Try again with -N switch'
sys.exit()
print "Scanning ports...\n"
sq=Queue()
lock = threading.Lock()
if options.genfile==True:
options.genlist=True
if options.genlist==True:
global ipdict
ipdict={}
for i in range(NUM):
st = Thread(target=scanipport)
st.setDaemon(True)
st.start()
for scanip in pinglist:
for port in PORTS:
sq.put((scanip,port))
sq.join()
if options.genlist==True:
for port,iplist in ipdict.items():
if options.genfile==True:
file=open(str(port)+'.txt', "w")
else:
print "\n========Port",port,'========'
for ip in iplist:
if options.genfile==True:
file.write(ip+"\n")
else:
print ip
if options.downpage==True and page!='':
f = open('page.html', 'w')
f.write(page)
f.close()
print 'page dumped to page.html'
|
test_immediatescheduler.py | import pytest
import unittest
import threading
from datetime import timedelta
from time import sleep
from rx.disposable import Disposable
from rx.scheduler import ImmediateScheduler
from rx.internal.basic import default_now
from rx.internal.constants import DELTA_ZERO
from rx.internal.exceptions import WouldBlockException
class TestImmediateScheduler(unittest.TestCase):
def test_immediate_singleton(self):
scheduler = [
ImmediateScheduler(),
ImmediateScheduler.singleton()
]
assert scheduler[0] is scheduler[1]
gate = [threading.Semaphore(0), threading.Semaphore(0)]
scheduler = [None, None]
def run(idx):
scheduler[idx] = ImmediateScheduler()
gate[idx].release()
for idx in (0, 1):
threading.Thread(target=run, args=(idx,)).start()
gate[idx].acquire()
assert scheduler[0] is not None
assert scheduler[1] is not None
assert scheduler[0] is scheduler[1]
def test_immediate_extend(self):
class MyScheduler(ImmediateScheduler):
pass
scheduler = [
MyScheduler(),
MyScheduler.singleton(),
ImmediateScheduler.singleton(),
]
assert scheduler[0] is scheduler[1]
assert scheduler[0] is not scheduler[2]
def test_immediate_now(self):
scheduler = ImmediateScheduler()
diff = scheduler.now - default_now()
assert abs(diff) < timedelta(milliseconds=1)
def test_immediate_now_units(self):
scheduler = ImmediateScheduler()
diff = scheduler.now
sleep(0.1)
diff = scheduler.now - diff
assert timedelta(milliseconds=80) < diff < timedelta(milliseconds=180)
def test_immediate_scheduleaction(self):
scheduler = ImmediateScheduler()
ran = False
def action(scheduler, state=None):
nonlocal ran
ran = True
scheduler.schedule(action)
assert ran
def test_immediate_schedule_action_error(self):
scheduler = ImmediateScheduler()
class MyException(Exception):
pass
def action(scheduler, state=None):
raise MyException()
with pytest.raises(MyException):
return scheduler.schedule(action)
def test_immediate_schedule_action_due_error(self):
scheduler = ImmediateScheduler()
ran = False
def action(scheduler, state=None):
nonlocal ran
ran = True
with pytest.raises(WouldBlockException):
scheduler.schedule_relative(0.1, action)
assert ran is False
def test_immediate_simple1(self):
scheduler = ImmediateScheduler()
xx = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
return Disposable()
scheduler.schedule(action, 42)
assert xx == 42
def test_immediate_simple2(self):
scheduler = ImmediateScheduler()
xx = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
return Disposable()
scheduler.schedule_absolute(default_now(), action, 42)
assert xx == 42
def test_immediate_simple3(self):
scheduler = ImmediateScheduler()
xx = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
return Disposable()
scheduler.schedule_relative(DELTA_ZERO, action, 42)
assert xx == 42
def test_immediate_recursive1(self):
scheduler = ImmediateScheduler()
xx = 0
yy = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
def inner_action(scheduler, state=None):
nonlocal yy
yy = state
return Disposable()
return scheduler.schedule(inner_action, 43)
scheduler.schedule(action, 42)
assert xx == 42
assert yy == 43
def test_immediate_recursive2(self):
scheduler = ImmediateScheduler()
xx = 0
yy = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
def inner_action(scheduler, state=None):
nonlocal yy
yy = state
return Disposable()
return scheduler.schedule_absolute(default_now(), inner_action, 43)
scheduler.schedule_absolute(default_now(), action, 42)
assert xx == 42
assert yy == 43
def test_immediate_recursive3(self):
scheduler = ImmediateScheduler()
xx = 0
yy = 0
def action(scheduler, state=None):
nonlocal xx
xx = state
def inner_action(scheduler, state):
nonlocal yy
yy = state
return Disposable()
return scheduler.schedule_relative(DELTA_ZERO, inner_action, 43)
scheduler.schedule_relative(DELTA_ZERO, action, 42)
assert xx == 42
assert yy == 43
|
signal_test2.py | # -*- coding: utf-8 -*-
# @author: edgardeng
# @date: 2021-03-18
# @file: 信号量
import signal
import time
from functools import partial
from multiprocessing import Process, Queue
class Counter(object):
def __init__(self, val):
self.current = val
def next(self):
self.current += 1
return self.current
def restart(self, val=0):
self.current = val
def handler(counter, signalnum, queue):
print("received signal:", signalnum)
counter.restart(queue.get())
def start_count(q):
counter = Counter(0)
signal.signal(signal.SIGABRT, partial(handler, counter, q))
while True:
print('--', counter.next())
time.sleep(0.2)
def interval_restart(q):
""" 定时重启"""
while True:
time.sleep(2)
q.put(0)
signal.alarm(2)
if __name__ == '__main__':
queue = Queue()
process1 = Process(target=start_count, args=(queue,))
process2 = Process(target=interval_restart, args=(queue,))
process1.start()
process2.start()
|
models.py | """Database models for the reader app."""
from __future__ import annotations
from hashlib import blake2b
from importlib.util import find_spec
from io import BytesIO
from logging import getLogger
from os import path, remove
from pathlib import PurePath
from shutil import rmtree
from threading import Lock, Thread
from typing import Any, List, Tuple
from zipfile import ZipFile
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation
)
from django.contrib.contenttypes.models import ContentType
from django.core.validators import MinValueValidator
from django.db import models
from django.db.models.query import Q
from django.shortcuts import reverse
from django.utils import timezone as tz
from django.utils.functional import cached_property
from django.utils.text import slugify
from MangAdventure import storage, utils, validators
from groups.models import Group
if find_spec('sentry_sdk'): # pragma: no cover
from sentry_sdk import capture_exception
else:
def capture_exception(_): pass # noqa: E704
_update_lock = Lock()
_logger = getLogger('django.db')
def _cover_uploader(obj: Series, name: str) -> str:
name = f'cover.{name.split(".")[-1]}'
name = str(obj.get_directory() / name)
if path.exists(name): # pragma: no cover
remove(name)
return name
class AliasManager(models.Manager):
"""A :class:`~django.db.models.Manager` for aliases."""
def names(self) -> List[str]:
"""
Get the names of the aliases.
:return: The values of the ``alias`` field.
"""
return list(self.get_queryset().values_list('name', flat=True))
class Alias(models.Model):
"""A generic alias :class:`~django.db.models.Model`."""
name = models.CharField(
blank=True, max_length=255, db_index=True, verbose_name='alias'
)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
content_object = GenericForeignKey('content_type', 'object_id')
objects = AliasManager()
class Meta:
verbose_name_plural = 'aliases'
constraints = (
models.UniqueConstraint(
fields=('name', 'content_type', 'object_id'),
name='unique_alias_content_object'
),
)
def __str__(self) -> str:
"""Return the alias of the instance."""
return self.name or ''
class Author(models.Model):
"""A model representing an author."""
#: The name of the author.
name = models.CharField(
max_length=100, db_index=True,
help_text="The author's full name."
)
#: The aliases of the author.
aliases = GenericRelation(
to=Alias, blank=True, related_query_name='main'
)
def __str__(self) -> str:
"""
Return a string representing the object.
:return: The name of the author.
"""
return self.name
class Artist(models.Model):
"""A model representing an artist."""
#: The name of the artist.
name = models.CharField(
max_length=100, db_index=True,
help_text="The artist's full name."
)
#: The aliases of the artist.
aliases = GenericRelation(
to=Alias, blank=True, related_query_name='main'
)
def __str__(self) -> str:
"""
Return a string representing the object.
:return: The name of the artist.
"""
return self.name
class Category(models.Model):
"""A model representing a category."""
#: The category's ID.
id = models.CharField(
primary_key=True, default='', max_length=25, auto_created=True
)
#: The unique name of the category.
name = models.CharField(
unique=True, help_text=(
'The name of the category. Must be '
'unique and cannot be changed once set.'
), max_length=25
)
#: The description of the category.
description = models.TextField(help_text='A description for the category.')
class Meta:
verbose_name_plural = 'categories'
ordering = ('id',)
def save(self, *args, **kwargs):
"""Save the current instance."""
if not self.id:
self.id = self.name.lower()
super().save(*args, **kwargs)
def __str__(self) -> str:
"""
Return a string representing the object.
:return: The name of the category.
"""
return self.name
class Series(models.Model):
#: The title of the series.
title = models.CharField(
max_length=250, db_index=True, help_text='The title of the series.'
)
#: The unique slug of the series.
slug = models.SlugField(
blank=True, unique=True, verbose_name='Custom slug',
help_text='The unique slug of the series. Will be used in the URL.'
)
#: The description of the series.
description = models.TextField(
blank=True, help_text='The description of the series.'
)
#: The cover image of the series.
cover = models.ImageField(
help_text=(
'Upload a cover image for the series.'
' Its size must not exceed 2 MBs.'
), upload_to=_cover_uploader,
validators=(validators.FileSizeValidator(2),),
storage=storage.CDNStorage((300, 300))
)
#: The authors of the series.
authors = models.ManyToManyField(Author, blank=True)
#: The artists of the series.
artists = models.ManyToManyField(Artist, blank=True)
#: The categories of the series.
categories = models.ManyToManyField(Category, blank=True)
#: The publication status of the series.
completed = models.BooleanField(
default=False, help_text='Is the series completed?'
)
#: The licensing status of the series.
licensed = models.BooleanField(
default=False, help_text='Is the series licensed?'
)
#: The date the series was created.
created = models.DateTimeField(auto_now_add=True)
#: The modification date of the series.
modified = models.DateTimeField(auto_now=True, db_index=True)
#: The chapter name format of the series.
format = models.CharField(
default='Vol. {volume}, Ch. {number}: {title}',
max_length=100, verbose_name='chapter name format',
help_text='The format used to render the chapter names.'
)
#: The aliases of the series.
aliases = GenericRelation(
to=Alias, blank=True, related_query_name='alias'
)
#: The person who manages this series.
manager = models.ForeignKey(
User, editable=True, blank=False, null=True,
help_text='The person who manages this series.',
on_delete=models.SET_NULL, limit_choices_to=(
Q(is_superuser=True) | Q(groups__name='Scanlator')
)
)
def get_absolute_url(self) -> str:
"""
Get the absolute URL of the object.
:return: The URL of :func:`reader.views.series`.
"""
return reverse('reader:series', args=(self.slug,))
def get_directory(self) -> PurePath:
"""
Get the storage directory of the object.
:return: A path relative to
:const:`~MangAdventure.settings.MEDIA_ROOT`.
"""
return PurePath('series', self.slug)
class Meta:
verbose_name_plural = 'series'
get_latest_by = 'modified'
def save(self, *args, **kwargs):
"""Save the current instance."""
self.slug = slugify(self.slug or self.title)
super().save(*args, **kwargs)
def __str__(self) -> str:
"""
Return a string representing the object.
:return: The title of the series.
"""
return self.title
class Chapter(models.Model):
"""A model representing a chapter."""
#: The title of the chapter.
title = models.CharField(
max_length=250, help_text='The title of the chapter.'
)
#: The number of the chapter.
number = models.FloatField(
default=0, help_text='The number of the chapter.'
)
#: The volume of the chapter.
volume = models.PositiveSmallIntegerField(default=0, help_text=(
'The volume of the chapter. Leave as 0 if the series has no volumes.'
))
#: The series this chapter belongs to.
series = models.ForeignKey(
Series, on_delete=models.CASCADE, related_name='chapters',
help_text='The series this chapter belongs to.'
)
#: The file which contains the chapter's pages.
file = models.FileField(
help_text=(
'Upload a zip or cbz file containing the chapter pages.'
' Its size cannot exceed 100 MBs and it'
' must not contain more than 1 subfolder.'
), validators=(
validators.FileSizeValidator(100),
validators.zipfile_validator
), blank=True, max_length=255
)
#: The status of the chapter.
final = models.BooleanField(
default=False, help_text='Is this the final chapter?'
)
#: The publication date of the chapter.
published = models.DateTimeField(
db_index=True, help_text=(
'You can select a future date to schedule'
' the publication of the chapter.'
), default=tz.now
)
#: The modification date of the chapter.
modified = models.DateTimeField(auto_now=True, db_index=True)
#: The groups that worked on this chapter.
groups = models.ManyToManyField(
Group, blank=True, related_name='releases'
)
#: The total views of the chapter.
views = models.PositiveIntegerField(
default=0, db_index=True, editable=False,
help_text='The total views of the chapter.'
)
class Meta:
ordering = ('series', 'volume', 'number')
get_latest_by = ('published', 'modified')
constraints = (
models.UniqueConstraint(
fields=('series', 'volume', 'number'),
name='unique_chapter_number'
),
models.CheckConstraint(
check=Q(number__gte=0),
name='chapter_number_positive'
)
)
@classmethod
def track_view(cls, **kwargs): # pragma: no cover
"""
Increment the chapter views in a new thread.
:param kwargs: The arguments given to the queryset filter.
"""
def run():
cls.objects.filter(**kwargs).update(
views=models.F('views') + 1
)
_update_lock.acquire()
try:
Thread(target=run, daemon=True, name='track_view').start()
except Exception as exc:
_logger.exception(exc)
capture_exception(exc)
finally:
_update_lock.release()
def save(self, *args, **kwargs):
"""Save the current instance."""
super().save(*args, **kwargs)
if self.file:
validators.zipfile_validator(self.file)
self.unzip()
self.series.completed = self.final
self.series.save(update_fields=('completed',))
def get_absolute_url(self) -> str:
"""
Get the absolute URL of the object.
:return: The URL of :func:`reader.views.chapter_redirect`.
"""
return reverse('reader:chapter', args=(
self.series.slug, self.volume, self.number
))
def get_directory(self) -> PurePath:
"""
Get the storage directory of the object.
:return: A path relative to
:const:`~MangAdventure.settings.MEDIA_ROOT`.
"""
return self.series.get_directory() / \
str(self.volume) / f'{self.number:g}'
def unzip(self):
"""Unzip the chapter and save its images."""
counter = 0
pages = []
dir_path = path.join(
'series', self.series.slug,
str(self.volume), f'{self.number:g}'
)
full_path = settings.MEDIA_ROOT / dir_path
if full_path.exists():
rmtree(full_path)
full_path.mkdir(parents=True)
with ZipFile(self.file) as zf:
for name in utils.natsort(zf.namelist()):
if zf.getinfo(name).is_dir():
continue
counter += 1
data = zf.read(name)
dgst = blake2b(data, digest_size=16).hexdigest()
filename = dgst + path.splitext(name)[-1]
file_path = path.join(dir_path, filename)
(full_path / filename).write_bytes(data)
pages.append(Page(
chapter_id=self.id, number=counter, image=file_path
))
self.pages.all().delete()
self.pages.bulk_create(pages)
self.file.delete(save=True)
def zip(self) -> BytesIO:
"""
Generate a zip file containing the pages of this chapter.
:return: The file-like object of the generated file.
"""
buf = BytesIO()
with ZipFile(buf, 'a', compression=8) as zf:
for page in self.pages.all():
img = page.image.path
name = f'{page.number:03d}'
ext = path.splitext(img)[-1]
zf.write(img, name + ext)
buf.seek(0)
return buf
@cached_property
def _tuple(self) -> Tuple[int, float]:
return self.volume, self.number
def __str__(self) -> str:
"""
Return a string representing the object.
:return: The chapter formatted according to the
:attr:`~reader.models.Series.format`.
"""
if not self.series: # pragma: no cover
return Series.format.default.format(
title=self.title or 'N/A',
volume=self.volume,
number=f'{self.number:g}',
date='', series=''
)
return self.series.format.format(
title=self.title,
volume=self.volume,
number=f'{self.number:g}',
date=self.published.strftime('%F'),
series=self.series.title
)
def __eq__(self, other: Any) -> bool:
"""
Check whether this object is equal to another.
If the other object is a tuple, the objects are equal if
the tuple consists of the volume and number of the chapter.
Otherwise, the objects are equal if they have the
same base model and their primary keys are equal.
:param other: Any other object.
:return: ``True`` if the objects are equal.
"""
if isinstance(other, tuple):
return self._tuple == other
return super().__eq__(other)
def __gt__(self, other: Any) -> bool:
"""
Check whether this object is greater than another.
If the other object is a tuple, this object is greater
if its volume and number is greater than the tuple.
Otherwise, it's greater if the objects have the same base model and
the tuple of its ``volume`` and ``number`` is greater than the other's.
:param other: Any other object.
:return: ``True`` if this object is greater.
:raises TypeError: If the other object is neither a tuple,
nor a ``Chapter`` model.
"""
if isinstance(other, tuple):
return self._tuple > other
if isinstance(other, self.__class__):
return self._tuple > other._tuple
raise TypeError(
"'>' not supported between instances of " +
f"'{self.__class__}' and '{other.__class__}'"
)
def __lt__(self, other: Any) -> bool:
"""
Check whether this object is less than another.
If the other object is a tuple, this object is lesser
if its volume and number is less than the tuple.
Otherwise, it's lesser if the objects have the same base model and
the tuple of its ``volume`` and ``number`` is less than the other's.
:param other: Any other object.
:return: ``True`` if this object is lesser.
:raises TypeError: If the other object is neither a tuple,
nor a ``Chapter`` model.
"""
if isinstance(other, tuple):
return self._tuple < other
if isinstance(other, self.__class__):
return self._tuple < other._tuple
raise TypeError(
"'<' not supported between instances of " +
f"'{self.__class__}' and '{other.__class__}'"
)
def __hash__(self) -> int:
"""
Return the hash of the object.
:return: An integer hash value.
"""
return hash(str(self)) & 0x7FFFFFFF
class _PageNumberField(models.PositiveSmallIntegerField):
default_validators = (MinValueValidator(1),)
def formfield(self, **kwargs): # pragma: no cover
# HACK: bypass parent to set min_value to 1
return super(
models.PositiveSmallIntegerField, self
).formfield(min_value=1, **kwargs)
class Page(models.Model):
"""A model representing a page."""
#: The chapter this page belongs to.
chapter = models.ForeignKey(
Chapter, related_name='pages', on_delete=models.CASCADE
)
#: The image of the page.
image = models.ImageField(storage=storage.CDNStorage(), max_length=255)
#: The number of the page.
number = _PageNumberField()
class Meta:
ordering = ('chapter', 'number')
constraints = (
models.CheckConstraint(
check=Q(number__gte=1),
name='page_number_nonzero'
),
)
@cached_property
def _thumb(self) -> models.ImageField:
img = self.image
img.storage = storage.CDNStorage((150, 150))
return img
@cached_property
def _file_name(self) -> str:
return self.image.name.rsplit('/')[-1]
def get_absolute_url(self) -> str:
"""
Get the absolute URL of the object.
:return: The URL of :func:`reader.views.chapter_page`.
"""
return reverse('reader:page', args=(
self.chapter.series.slug, self.chapter.volume,
self.chapter.number, self.number
))
def __str__(self) -> str:
"""
Return a string representing the object.
:return: The title of the series, the volume, number, title
of the chapter, and the file name of the page.
"""
return '{0.series.title} - {0.volume}/{0.number} #{1:03d}' \
.format(self.chapter, self.number)
def __eq__(self, other: Any) -> bool:
"""
Check whether this object is equal to another.
If the other object is a number, the objects are equal if
the ``number`` of this object is equal to the other object.
Otherwise, the objects are equal if they have the same base model
and their ``chapter`` and ``number`` are respectively equal.
:param other: Any other object.
:return: ``True`` if the objects are equal.
"""
if isinstance(other, (float, int)):
return self.number == other
if not isinstance(other, self.__class__):
return False
return self.chapter == other.chapter and self.number == other.number
def __gt__(self, other: Any) -> bool:
"""
Check whether this object is greater than another.
If the other object is a number, this object is greater
if its ``number`` is greater than the other object.
Otherwise, it's greater if the objects have the same base model
and the ``number`` of this object is greater than the other's.
:param other: Any other object.
:return: ``True`` if this object is greater.
:raises TypeError: If the other object is neither a tuple,
nor a ``Page`` model.
"""
if isinstance(other, (float, int)):
return self.number > other
if isinstance(other, self.__class__):
return self.number > other.number
raise TypeError(
"'<' not supported between instances of " +
f"'{self.__class__}' and '{other.__class__}'"
)
def __lt__(self, other: Any) -> bool:
"""
Check whether this object is less than another.
If the other object is a number, this object is lesser
if its ``number`` is less than the other object.
Otherwise, it's lesser if the objects have the same base model
and the ``number`` of this object is less than the other's.
:param other: Any other object.
:return: ``True`` if this object is lesser.
:raises TypeError: If the other object is neither a tuple,
nor a ``Page`` model.
"""
if isinstance(other, (float, int)):
return self.number < other
if isinstance(other, self.__class__):
return self.number < other.number
raise TypeError(
"'<' not supported between instances of " +
f"'{self.__class__}' and '{other.__class__}'"
)
def __hash__(self) -> int:
"""
Return the hash of the object.
:return: An integer hash value.
"""
name = path.splitext(self._file_name)[0]
if len(name) != 32: # pragma: no cover
return abs(hash(str(self)))
return int(name, 16)
__all__ = [
'Author', 'Artist', 'Series',
'Chapter', 'Page', 'Category', 'Alias'
]
|
penv.py | import gym
from multiprocessing import Process, Pipe
def get_local(obs):
# get local view
return obs["image"]*0.0
def reset(env, conventional, archimedean):
obs = env.reset()
active_sender = True
active_receiver = not active_sender
active = (active_sender, active_receiver)
acting = (False, active_receiver)
sending = (active_sender, False)
globs = obs.copy()
obs["image"] = get_local(obs)
obss = (globs, obs) if not archimedean else (obs, globs)
extra = (0)
return active, acting, sending, obss, extra
def step(env, conventional, archimedean, action, prev_result):
if prev_result[0][1]:
# receiver's frame
obs, reward, done, info = env.step(action)
if done:
obs = env.reset()
active_sender = True
active_receiver = not active_sender
active = (active_sender, active_receiver)
acting = (False, active_receiver)
sending = (active_sender, False)
globs = obs.copy()
obs["image"] = get_local(obs)
obss = (globs, obs) if not archimedean else (obs, globs)
extra = (0)
else:
# sender's frame
reward = 0.0
done = False
active_sender = False
active_receiver = not active_sender
active = (active_sender, active_receiver)
acting = (False, active_receiver)
sending = (active_sender, False)
obss = prev_result[3]
extra = (0)
return active, acting, sending, obss, extra, reward, done
def worker(conn, env, conventional, archimedean):
while True:
cmd, action, prev_result = conn.recv()
if cmd == "step":
conn.send(step(env, conventional, archimedean, action, prev_result))
elif cmd == "reset":
conn.send(reset(env, conventional, archimedean))
else:
raise NotImplementedError
class ParallelEnv(gym.Env):
"""A concurrent execution of environments in multiple processes."""
def __init__(self, env, conventional, archimedean):
assert len(env) >= 1, "No environment given."
self.env = env
self.num_procs = len(env)
self.conventional = conventional
self.archimedean = archimedean
self.observation_space = self.env[0].observation_space
self.action_space = self.env[0].action_space
self.locals = []
self.processes = []
for i, env in enumerate(self.env[1:]):
local, remote = Pipe()
self.locals.append(local)
p = Process(target=worker, args=(remote, env, conventional, archimedean))
p.daemon = True
p.start()
remote.close()
self.processes.append(p)
def reset(self):
for local in self.locals:
local.send(("reset", None, None))
self.prev_results = [reset(self.env[0], self.conventional, self.archimedean)] + [local.recv() for local in self.locals]
return zip(*self.prev_results)
def step(self, actions):
for local, action, prev_result in zip(self.locals, actions[1:, 1], self.prev_results[1:]):
local.send(("step", action, prev_result))
self.prev_results = [step(self.env[0], self.conventional, self.archimedean, actions[0, 1], self.prev_results[0])] + [local.recv() for local in self.locals]
return zip(*self.prev_results)
def render(self):
raise NotImplementedError
def __del__(self):
for p in self.processes:
p.terminate()
|
email.py | # from threading import Thread
# from flask import current_app, render_template
# from flask_mail import Message
# from . import mail
#
#
# def send_async_email(app, msg):
# with app.app_context():
# mail.send(msg)
#
#
# def send_email(to, subject, template, **kwargs):
# app = current_app._get_current_object()
# msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
# sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
# msg.body = render_template(template + '.txt', **kwargs)
# msg.html = render_template(template + '.html', **kwargs)
# thr = Thread(target=send_async_email, args=[app, msg])
# thr.start()
# return thr
|
http_event_collector.py | """splunk_http_event_collector.py
Splunk HTTP event submission class
Remember: Friends don't let friends send in non Common Information Model data: http://docs.splunk.com/Documentation/CIM/latest/User/Overview
Please use CIM friendly field names when sending in data.
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import requests
import json
import time
import socket
import threading
import queue
__author__ = "george@georgestarcher.com (George Starcher)"
http_event_collector_debug = False
http_event_collector_SSL_verify = False
# Default batch max size to match splunk's default limits for max byte
# See http_input stanza in limits.conf; note in testing I had to limit to 100,000 to avoid http event collector breaking connection
# Auto flush will occur if next event payload will exceed limit
_max_content_bytes = 100000
_number_of_threads = 10
class http_event_collector(object):
def __init__(self,token,http_event_server,host="",http_event_port='8088',http_event_server_ssl=True,max_bytes=_max_content_bytes):
self.token = token
self.batchEvents = []
self.maxByteLength = max_bytes
self.currentByteLength = 0
self.flushQueue = queue.Queue(0)
for x in range(_number_of_threads):
t = threading.Thread(target=self.batchThread)
t.daemon = True
t.start()
# Set host to specified value or default to localhostname if no value provided
if host:
self.host = host
else:
self.host = socket.gethostname()
# Build and set server_uri for http event collector
# Defaults to SSL if flag not passed
# Defaults to port 8088 if port not passed
if http_event_server_ssl:
protocol = 'https'
else:
protocol = 'http'
self.server_uri = '%s://%s:%s/services/collector/event' % (protocol, http_event_server, http_event_port)
if http_event_collector_debug:
print(self.token)
print(self.server_uri)
def sendEvent(self,payload,eventtime=""):
# Method to immediately send an event to the http event collector
headers = {'Authorization':'Splunk '+self.token}
# If eventtime in epoch not passed as optional argument and not in payload, use current system time in epoch
if not eventtime and 'time' not in payload:
eventtime = str(int(time.time()))
payload.update({'time':eventtime})
# Fill in local hostname if not manually populated
if 'host' not in payload:
payload.update({"host":self.host})
# send event to http event collector
event = []
event.append(json.dumps(payload))
self.flushQueue.put(event)
if http_event_collector_debug:
print("Single Submit: Sticking the event on the queue.")
self.waitUntilDone()
def batchEvent(self,payload,eventtime=""):
# Method to store the event in a batch to flush later
# Fill in local hostname if not manually populated
if 'host' not in payload:
payload.update({"host":self.host})
# If eventtime in epoch not passed as optional argument and not in payload, use current system time in epoch
if not eventtime and 'time' not in payload:
eventtime = str(int(time.time()))
payload.update({"time":eventtime})
payloadString = json.dumps(payload)
payloadLength = len(payloadString)
if (self.currentByteLength+payloadLength) > self.maxByteLength:
if http_event_collector_debug:
print("Auto Flush: Sticking the batch on the queue.")
self.flushQueue.put(self.batchEvents)
self.batchEvents = []
self.currentByteLength = 0
else:
self.currentByteLength += payloadLength
self.batchEvents.append(payloadString)
def batchThread(self):
# Threads to send batches of events.
while True:
if http_event_collector_debug:
print("Events received on thread. Sending to Splunk.")
payload = " ".join(self.flushQueue.get())
headers = {'Authorization':'Splunk '+self.token}
requests.post(self.server_uri, data=payload, headers=headers, verify=http_event_collector_SSL_verify)
self.flushQueue.task_done()
def waitUntilDone(self):
# Block until all flushQueue is empty.
self.flushQueue.join()
return
def flushBatch(self):
if http_event_collector_debug:
print("Manual Flush: Sticking the batch on the queue.")
self.flushQueue.put(self.batchEvents)
self.batchEvents = []
self.currentByteLength = 0
self.waitUntilDone() |
local_job_service.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import logging
import os
import queue
import shutil
import subprocess
import tempfile
import threading
import time
import traceback
from builtins import object
import grpc
from google.protobuf import text_format
from apache_beam.metrics import monitoring_infos
from apache_beam.portability.api import beam_artifact_api_pb2
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_job_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners.portability import abstract_job_service
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
_LOGGER = logging.getLogger(__name__)
class LocalJobServicer(abstract_job_service.AbstractJobServiceServicer):
"""Manages one or more pipelines, possibly concurrently.
Experimental: No backward compatibility guaranteed.
Servicer for the Beam Job API.
This JobService uses a basic local implementation of runner to run the job.
This JobService is not capable of managing job on remote clusters.
By default, this JobService executes the job in process but still uses GRPC
to communicate pipeline and worker state. It can also be configured to use
inline calls rather than GRPC (for speed) or launch completely separate
subprocesses for the runner and worker(s).
"""
def __init__(self, staging_dir=None):
super(LocalJobServicer, self).__init__()
self._cleanup_staging_dir = staging_dir is None
self._staging_dir = staging_dir or tempfile.mkdtemp()
self._artifact_service = artifact_service.BeamFilesystemArtifactService(
self._staging_dir)
self._artifact_staging_endpoint = None
def create_beam_job(self, preparation_id, job_name, pipeline, options):
# TODO(angoenka): Pass an appropriate staging_session_token. The token can
# be obtained in PutArtifactResponse from JobService
if not self._artifact_staging_endpoint:
# The front-end didn't try to stage anything, but the worker may
# request what's here so we should at least store an empty manifest.
self._artifact_service.CommitManifest(
beam_artifact_api_pb2.CommitManifestRequest(
staging_session_token=preparation_id,
manifest=beam_artifact_api_pb2.Manifest()))
provision_info = fn_api_runner.ExtendedProvisionInfo(
beam_provision_api_pb2.ProvisionInfo(
job_id=preparation_id,
job_name=job_name,
pipeline_options=options,
retrieval_token=self._artifact_service.retrieval_token(
preparation_id)),
self._staging_dir)
return BeamJob(
preparation_id,
pipeline,
options,
provision_info,
self._artifact_staging_endpoint)
def start_grpc_server(self, port=0):
self._server = grpc.server(UnboundedThreadPoolExecutor())
port = self._server.add_insecure_port('localhost:%d' % port)
beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server)
beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server(
self._artifact_service, self._server)
self._artifact_staging_endpoint = endpoints_pb2.ApiServiceDescriptor(
url='localhost:%d' % port)
self._server.start()
_LOGGER.info('Grpc server started on port %s', port)
return port
def stop(self, timeout=1):
self._server.stop(timeout)
if os.path.exists(self._staging_dir) and self._cleanup_staging_dir:
shutil.rmtree(self._staging_dir, ignore_errors=True)
def GetJobMetrics(self, request, context=None):
if request.job_id not in self._jobs:
raise LookupError("Job {} does not exist".format(request.job_id))
result = self._jobs[request.job_id].result
monitoring_info_list = []
for mi in result._monitoring_infos_by_stage.values():
monitoring_info_list.extend(mi)
# Filter out system metrics
user_monitoring_info_list = [
x for x in monitoring_info_list
if monitoring_infos._is_user_monitoring_info(x) or
monitoring_infos._is_user_distribution_monitoring_info(x)
]
return beam_job_api_pb2.GetJobMetricsResponse(
metrics=beam_job_api_pb2.MetricResults(
committed=user_monitoring_info_list))
class SubprocessSdkWorker(object):
"""Manages a SDK worker implemented as a subprocess communicating over grpc.
"""
def __init__(self, worker_command_line, control_address, worker_id=None):
self._worker_command_line = worker_command_line
self._control_address = control_address
self._worker_id = worker_id
def run(self):
logging_server = grpc.server(UnboundedThreadPoolExecutor())
logging_port = logging_server.add_insecure_port('[::]:0')
logging_server.start()
logging_servicer = BeamFnLoggingServicer()
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
logging_servicer, logging_server)
logging_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url='localhost:%s' % logging_port))
control_descriptor = text_format.MessageToString(
endpoints_pb2.ApiServiceDescriptor(url=self._control_address))
env_dict = dict(
os.environ,
CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor,
LOGGING_API_SERVICE_DESCRIPTOR=logging_descriptor
)
# only add worker_id when it is set.
if self._worker_id:
env_dict['WORKER_ID'] = self._worker_id
with fn_api_runner.SUBPROCESS_LOCK:
p = subprocess.Popen(
self._worker_command_line,
shell=True,
env=env_dict)
try:
p.wait()
if p.returncode:
raise RuntimeError(
'Worker subprocess exited with return code %s' % p.returncode)
finally:
if p.poll() is None:
p.kill()
logging_server.stop(0)
class BeamJob(abstract_job_service.AbstractBeamJob):
"""This class handles running and managing a single pipeline.
The current state of the pipeline is available as self.state.
"""
def __init__(self,
job_id,
pipeline,
options,
provision_info,
artifact_staging_endpoint):
super(BeamJob, self).__init__(
job_id, provision_info.provision_info.job_name, pipeline, options)
self._provision_info = provision_info
self._artifact_staging_endpoint = artifact_staging_endpoint
self._state = None
self._state_queues = []
self._log_queues = []
self.state = beam_job_api_pb2.JobState.STOPPED
self.daemon = True
self.result = None
@property
def state(self):
return self._state
@state.setter
def state(self, new_state):
# Inform consumers of the new state.
for queue in self._state_queues:
queue.put(new_state)
self._state = new_state
def get_state(self):
return self.state
def prepare(self):
pass
def artifact_staging_endpoint(self):
return self._artifact_staging_endpoint
def run(self):
self.state = beam_job_api_pb2.JobState.STARTING
self._run_thread = threading.Thread(target=self._run_job)
self._run_thread.start()
def _run_job(self):
self.state = beam_job_api_pb2.JobState.RUNNING
with JobLogHandler(self._log_queues):
try:
result = fn_api_runner.FnApiRunner(
provision_info=self._provision_info).run_via_runner_api(
self._pipeline_proto)
_LOGGER.info('Successfully completed job.')
self.state = beam_job_api_pb2.JobState.DONE
self.result = result
except: # pylint: disable=bare-except
_LOGGER.exception('Error running pipeline.')
_LOGGER.exception(traceback)
self.state = beam_job_api_pb2.JobState.FAILED
raise
def cancel(self):
if not self.is_terminal_state(self.state):
self.state = beam_job_api_pb2.JobState.CANCELLING
# TODO(robertwb): Actually cancel...
self.state = beam_job_api_pb2.JobState.CANCELLED
def get_state_stream(self):
# Register for any new state changes.
state_queue = queue.Queue()
self._state_queues.append(state_queue)
yield self.state
while True:
current_state = state_queue.get(block=True)
yield current_state
if self.is_terminal_state(current_state):
break
def get_message_stream(self):
# Register for any new messages.
log_queue = queue.Queue()
self._log_queues.append(log_queue)
self._state_queues.append(log_queue)
current_state = self.state
yield current_state
while not self.is_terminal_state(current_state):
msg = log_queue.get(block=True)
yield msg
if isinstance(msg, int):
current_state = msg
class BeamFnLoggingServicer(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
def Logging(self, log_bundles, context=None):
for log_bundle in log_bundles:
for log_entry in log_bundle.log_entries:
_LOGGER.info('Worker: %s', str(log_entry).replace('\n', ' '))
return iter([])
class JobLogHandler(logging.Handler):
"""Captures logs to be returned via the Beam Job API.
Enabled via the with statement."""
# Mapping from logging levels to LogEntry levels.
LOG_LEVEL_MAP = {
logging.FATAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.CRITICAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.ERROR: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR,
logging.WARNING: beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING,
logging.INFO: beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC,
logging.DEBUG: beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG,
}
def __init__(self, log_queues):
super(JobLogHandler, self).__init__()
self._last_id = 0
self._logged_thread = None
self._log_queues = log_queues
def __enter__(self):
# Remember the current thread to demultiplex the logs of concurrently
# running pipelines (as Python log handlers are global).
self._logged_thread = threading.current_thread()
logging.getLogger().addHandler(self)
def __exit__(self, *args):
self._logged_thread = None
self.close()
def _next_id(self):
self._last_id += 1
return str(self._last_id)
def emit(self, record):
if self._logged_thread is threading.current_thread():
msg = beam_job_api_pb2.JobMessage(
message_id=self._next_id(),
time=time.strftime('%Y-%m-%d %H:%M:%S.',
time.localtime(record.created)),
importance=self.LOG_LEVEL_MAP[record.levelno],
message_text=self.format(record))
# Inform all message consumers.
for queue in self._log_queues:
queue.put(msg)
|
robot_driver.py | import rospy
import time
import actionlib
import threading
import argparse
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryFeedback
from sensor_msgs.msg import JointState
from std_msgs.msg import String
from hiwin_robot_interface import HiwinRobotInterface
JOINTS_NAMES = ['joint_1', 'joint_2', 'joint_3',
'joint_4', 'joint_5', 'joint_6']
last_joint_states_lock = threading.Lock()
DEBUG = True # Set True to show debug log, False to hide it.
class HiwinRobotStatesPublisher(object):
"""Reads robot joints' states and publish them on the joint_state topic."""
def __init__(self, robot_interface):
# type: (HiwinRobotInterface) -> None
"""Initialize Robot's state publisher.
:param
robot_interface: HiwinRobotInterface used to connect to the robot
"""
self.robot_interface = robot_interface
self.robot_name = self.robot_interface.name
self.pub_joint_states = rospy.Publisher('/joint_states',
JointState,
queue_size=100)
self.ip = None
self.robot_handler = None
self.__keep_running = False
self.__thread = None
def start(self):
"""Begin the thread that runs the self.__run() function."""
self.__keep_running = True
self.__thread = threading.Thread(name="HiwinRobotJointStatePublisher",
target=self.__run)
self.__thread.daemon = True
self.__thread.start()
def __run(self):
"""Read the robot's axis, publish on the joint_states topic."""
global last_joint_states_lock
while self.__keep_running:
result, joints = self.robot_interface.get_current_joints()
msg = JointState()
msg.header.stamp = rospy.get_rostime()
msg.name = [self.robot_name+"/".join(["", joint_name]) for joint_name in JOINTS_NAMES]
msg.position = joints
self.pub_joint_states.publish(msg)
rospy.sleep(0.01)
with last_joint_states_lock:
pass
class HiwinDIOPublisher(object):
def __init__(self, robot_interface):
# type: (HiwinRobotInterface) -> None
"""Initialize Robot's state publisher.
:param
robot_interface: HiwinRobotInterface used to connect to the robot
"""
self.robot_interface = robot_interface
self.robot_name = self.robot_interface.name
self.pub_dig_in = rospy.Publisher(self.robot_name + '/digital_input',
String, queue_size=10)
self.pub_dig_out = rospy.Publisher(self.robot_name + '/digital_output',
String, queue_size=10)
self.ip = None
self.robot_handler = None
self.__keep_running = False
self.__thread_i = None
self.__thread_o = None
def start(self):
"""Begin the thread that runs the self.__run() function."""
self.__keep_running = True
self.__thread_i = threading.Thread(name="HiwinDIPublisher",
target=self.__run_i)
self.__thread_i.daemon = True
self.__thread_i.start()
self.__thread_o = threading.Thread(name="HiwinDOPublisher",
target=self.__run_o)
self.__thread_o.daemon = True
self.__thread_o.start()
def __run_i(self):
"""Read the robot's axis, publish on the joint_states topic."""
global last_joint_states_lock
while self.__keep_running:
inputs = self.robot_interface.get_current_digital_inputs()
msg = String()
msg.data = "".join([str(i)+" " for i in inputs])
self.pub_dig_in.publish(msg)
rospy.sleep(0.1)
with last_joint_states_lock:
pass
def __run_o(self):
"""Read the robot's axis, publish on the joint_states topic."""
global last_joint_states_lock
while self.__keep_running:
outputs = self.robot_interface.get_current_digital_outputs()
msg = String()
msg.data = "".join([str(o)+" " for o in outputs])
self.pub_dig_out.publish(msg)
rospy.sleep(0.1)
with last_joint_states_lock:
pass
class HiwinDOSetter(object):
def __init__(self, robot_interface):
# type: (HiwinRobotInterface) -> None
"""Initialize Robot's state publisher.
:param
robot_interface: HiwinRobotInterface used to connect to the robot
"""
self.robot_interface = robot_interface
self.robot_name = self.robot_interface.name
self.set_dig_out = rospy.Subscriber(self.robot_name +
'/set_digital_output',
String, callback=self.__set_do,
queue_size=10)
self.ip = None
self.robot_handler = None
self.__keep_running = False
self.__thread_i = None
self.__thread_o = None
def __set_do(self, msg): # type: (String) -> None
if not self.robot_interface.is_connected():
self.robot_interface.reconnect()
try:
do_index, do_value = msg.data.split(" ")
self.robot_interface.set_io_value(int(do_index), bool(int(do_value)==1))
except:
rospy.logwarn_once("Set DO Message format is wrong, expected 'DO_INDEX DO_VALUE' (e.g. '12 1') but got {}".format(msg.data))
class HiwinRobotTrajectoryFollower(object):
"""Class used to make the robot follow the trajectory given by MoveIt!"""
RATE = 0.02
def __init__(self, robot_interface):
# type: (HiwinRobotInterface) -> None
self.robot_interface = robot_interface
self.robot_name = self.robot_interface.name
self.server = actionlib.ActionServer(self.robot_name +
"/follow_joint_trajectory",
FollowJointTrajectoryAction,
self.on_goal, self.on_cancel,
auto_start=False)
rospy.Subscriber("manipulator/taskstart", String,
callback=self.taskstart_topic_callback, queue_size=10)
self.pub_feedback_states = rospy.Publisher(
'/feedback_states',
FollowJointTrajectoryFeedback,
queue_size=10 )
self.goal_handle = None
self.start_time = 0
self.received_trajectory = None # Store last received trajectory
self.trajectory_to_execute = None # Once executed, it will be deleted
self.target = None
self.update_timer = rospy.Timer(rospy.Duration(self.RATE),
self._update)
self.__keep_running = False
self.__thread = None
def start(self):
"""Begin the thread that runs the self.server.start function."""
self.__keep_running = True
self.__thread = threading.Thread(name="HiwinRobotControlNode",
target=self.server.start)
self.__thread.daemon = True
self.__thread.start()
self.__keep_running = False
def on_goal(self, goal_handle):
"""When a trajectory has been received from move_group, execute it.
Main idea: the trajectory is composed by many points. We move point to
point (PTP) from each point to the next one. Note: as today, we
do not take care of the time execution of the trajectory. For
example, we do not care about the velocity of the trajectory that
has been received. In future it is needed to add this possibility.
"""
# Check if there is another goal on the go
if self.goal_handle:
self.on_cancel(None)
# Communicate that the goal has been accepted
self.goal_handle = goal_handle
self.goal_handle.set_accepted()
rospy.loginfo("Trajectory received and accepted")
# Read the trajectory
self.start_time = rospy.Time.now()
self.received_trajectory = goal_handle.get_goal().trajectory
self.trajectory_to_execute = goal_handle.get_goal().trajectory
# Print out the target final point
self.target = self.received_trajectory.points[-1].positions
rospy.loginfo("The trajectory has a total of {} points."
.format(len(self.received_trajectory.points)))
def on_cancel(self, _):
"""When a trajectory is canceled, stop the robot."""
rospy.logwarn("Trajectory was canceled before reaching goal.")
# Stop the Robot in the current position
self.robot_interface.stop_motion()
self.trajectory_to_execute = None
self.target = None
self.goal_handle.set_canceled()
self.goal_handle = None
def _update(self, event):
"""Manage trajectory goal_handle and publish the feedback"""
# Publish the current feedback states of the robot
self.publish_feedback()
# If there is no goal pending, do nothing.
if not self.goal_handle:
return
# If the goal has been reached, set goal successfully reached.
if self.target is not None:
if self.goal_reached(self.target):
rospy.loginfo("Trajectory completed. Goal reached!")
self.goal_handle.set_succeeded()
self.goal_handle = None
self.trajectory_to_execute = None
self.target = None
return
# If there's no position trajectory to be executed, do nothing.
if self.trajectory_to_execute is None:
return
# If a position trajectory need to be executed, execute it:
# for each point in the trajectory execute Point-To-Point movement
# (note that the first point is always the starting point)
for point in self.trajectory_to_execute.points[1:]:
# Read the target values of the joints
target_joints = [joint for joint in point.positions]
# Make sure the goal has not been canceled meanwhile
if not self.goal_handle:
break
# Move to the target joints
self.robot_interface.move_ptp(target_axis_values=target_joints)
# The trajectory has been executed, forget it.
self.trajectory_to_execute = None
def goal_reached(self, target_joint_states):
# type: (list[float]) -> bool
"""Returns True if the robot has achieved the goal, False otherwise.
To reach the goal means:
1. Be near the goal
2. Not moving (being in IDLE state)
:param
target_joint_states: The goal states (in radians)
"""
state_reached = self.robot_interface.is_in_state(target_joint_states)
not_moving = self.robot_interface.is_in_idle()
return state_reached and not_moving
def publish_feedback(self):
"""Publishes the position feedback of the robot.
The feedback is the difference between the desired states and the
current ones.
"""
# If there is no trajectory, there is nothing to do
if self.received_trajectory is None:
return
# Get the current states of the joints
success, current_joints_states = \
self.robot_interface.get_current_joints()
if not success: # Couldn't get the current joints' state
rospy.logwarn("Could not publish on feedback_states:"
"current states are unknown. Assuming all to 0.")
current_joints_states = [0 for _ in range(6)]
# Get the desired position. What should the robot joints be right now?
time_from_start = rospy.Time.now() - self.start_time
# Find which point represents the current desired position
for point in self.received_trajectory.points:
if time_from_start > point.time_from_start:
continue
break
desired_point = point
# Make sure the length of the current states and the target states
# is exactly the length of the joints
assert len(JOINTS_NAMES) == len(current_joints_states) and \
len(JOINTS_NAMES) == len(desired_point.positions), \
"Target and current states have different length. " \
"Expected {} joints, got {} (target) and {} (current)".format(
len(JOINTS_NAMES), len(desired_point.positions),
len(current_joints_states)
)
# Create the message to be published
msg = FollowJointTrajectoryFeedback()
msg.header.frame_id = ""
msg.header.stamp = rospy.get_rostime()
msg.joint_names = JOINTS_NAMES
# Set the goal states4
msg.desired.positions = desired_point.positions
msg.desired.velocities = []
msg.desired.accelerations = []
msg.desired.effort = []
msg.desired.time_from_start = desired_point.time_from_start
# Set the actual states
msg.actual.positions = current_joints_states
msg.actual.velocities = []
msg.actual.accelerations = []
msg.actual.effort = []
msg.actual.time_from_start = desired_point.time_from_start
# Calculate the error
position_error = [goal - current for goal, current in zip(
msg.desired.positions, msg.actual.positions
)]
velocity_error = [goal - current for goal, current in zip(
msg.desired.velocities, msg.actual.velocities
)]
acceleration_error = [goal - current for goal, current in zip(
msg.desired.accelerations,
msg.actual.accelerations
)]
effort_error = [goal - current for goal, current in zip(
msg.desired.effort, msg.actual.effort
)]
# Set the errors
msg.error.positions = position_error
msg.error.velocities = velocity_error
msg.error.accelerations = acceleration_error
msg.error.effort = effort_error
msg.error.time_from_start = desired_point.time_from_start
# Publish the message on /feedback_states topic
self.pub_feedback_states.publish(msg)
def taskstart_topic_callback(self, msg): # type: (String) -> None
if not self.robot_interface.is_connected():
self.robot_interface.reconnect(trials=1, sec_between_trials=1)
self.robot_interface.stop_task()
if not self.robot_interface.is_connected():
self.robot_interface.reconnect(trials=1, sec_between_trials=1)
self.robot_interface.start_task(msg.data) == 0
return
def myhook():
robot_ctr.close()
print ("shutdown time!")
if __name__ == '__main__':
# Get the arguments
arg_parser = argparse.ArgumentParser("Driver Node")
arg_parser.add_argument("--robot_ip", help="IP addr of the robot",
type=str)
arg_parser.add_argument("--robot_name", help="Name of the robot", type=str)
arg_parser.add_argument("--control_mode", help="Default is 1, set it to 0 if you do not want to control the robot, but only to monitor its state.",
type=bool, default=1, required=False)
arg_parser.add_argument("--log_level", help="Logging level: INFO, DEBUG",
type=str, default="INFO", required=False)
arg_parser.add_argument("__name")
arg_parser.add_argument("__log")
args = arg_parser.parse_args()
# Extract the necessary arguments
robot_ip = args.robot_ip
robot_name = args.robot_name
control_mode = int(args.control_mode)
if args.log_level == "DEBUG":
log_level = rospy.DEBUG
elif args.log_level == "ERROR":
log_level = rospy.ERROR
else:
log_level = rospy.INFO
# Start the ROS node
rospy.init_node('hiwin_robot_sdk_'+robot_name,
log_level=log_level,
disable_signals=True)
if rospy.get_param("use_sim_time", False):
rospy.logwarn("use_sim_time is set!!!")
robot_ctr = HiwinRobotInterface(robot_ip=robot_ip, connection_level=control_mode,
name=robot_name)
robot_ctr.connect()
# Highest priority: start the controllers for the robot.
# (If the controllers don't start in time, MoveIt! won't work.)
# Start arm controller
arm_action_server = HiwinRobotTrajectoryFollower(robot_ctr)
arm_action_server.start()
set_do_thread = HiwinDOSetter(robot_ctr)
robot_mtr = HiwinRobotInterface(robot_ip=robot_ip, connection_level=0,
name=robot_name)
# Start joint states publisher for the robot
hiwin_states_publisher = HiwinRobotStatesPublisher(robot_mtr)
hiwin_states_publisher.start()
# Start Digital I/O publisher for the robot
hiwin_dio_publisher = HiwinDIOPublisher(robot_mtr)
hiwin_dio_publisher.start()
try:
rospy.on_shutdown(myhook)
rospy.spin()
except KeyboardInterrupt:
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.