code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
from test.parser.pattern.matching.base import PatternMatcherBaseClass
class PatternMatcherISetTests(PatternMatcherBaseClass):
def test_basic_iset_match(self):
self.add_pattern_to_graph(pattern="I AM A <iset>MAN, WOMAN</iset>", topic="*", that="*", template="1")
context = self.match_sentence("I AM A MAN", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
context = self.match_sentence("I AM A WOMAN", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
def test_multiple_iset_match(self):
self.add_pattern_to_graph(pattern="I LIKE TO <iset>PARTY, DRINK, SLEEP</iset> DURING THE DAY", topic="*", that="*", template="1")
self.add_pattern_to_graph(pattern="I LIKE TO <iset>PARTY, DRINK, SLEEP</iset> DURING THE NIGHT", topic="*", that="*", template="2")
context = self.match_sentence("I LIKE TO PARTY DURING THE DAY", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("1", context.template_node().template.word)
context = self.match_sentence("I LIKE TO PARTY DURING THE NIGHT", topic="*", that="*")
self.assertIsNotNone(context)
self.assertIsNotNone(context.template_node())
self.assertEqual("2", context.template_node().template.word)
| dkamotsky/program-y | src/test/parser/pattern/matching/test_iset.py | Python | mit | 1,582 |
__author__ = 'jdaniel'
from Algorithms import serial
from GaiaSolve.algorithm import Algorithm
class SERIAL(Algorithm):
def __init__(self):
"""
Wrapped version of the serial NSGA-2 algorithm
:return: None
"""
super(SERIAL, self).__init__()
def run(self):
"""
Run the algorithm
:return:
"""
opt = serial.Serial(self.objective_function)
# Add the design variables
for i in xrange(self.model.number_of_design_variables):
name = self.model.decision_variable_names[i]
lower = self.model.lower_bound[i]
upper = self.model.upper_bound[i]
opt.register_variable(name, lower, upper)
if self.model.has_equality_constraints:
for i in xrange(self.model.number_of_equality_constraints):
name = self.model.equality_constraint_variable_names[i]
opt.register_constraint(name, 'e')
if self.model.has_inequality_constraints:
for i in xrange(self.model.number_of_inequality_constraints):
name = self.model.inequality_constraint_variable_names[i]
opt.register_constraint(name, 'i')
for i in xrange(self.model.number_of_objectives):
name = self.model.objective_variable_names[i]
opt.register_objective(name)
for key, value in zip(self.options.keys(), self.options.values()):
opt.set_options(key, value)
soln, meta, gen, arch, hist = opt.run()
return soln, meta, gen, arch, hist
def objective_function(self, x):
"""
Objective function for evaluating the model
:param x:
:return:
"""
self.model.x = x
f = self.model.obj
h = self.model.eqcon
g = self.model.neqcon
return f, h, g
if __name__ == '__main__':
opt = SERIAL()
print (opt.__class__)
| jldaniel/Gaia | GaiaSolve/Algorithms/_serial.py | Python | mit | 1,949 |
"""Implements nose test program and collector.
"""
from __future__ import generators
import logging
import os
import sys
import time
import unittest
from nose.config import Config, all_config_files
from nose.loader import defaultTestLoader
from nose.plugins.manager import PluginManager, DefaultPluginManager, \
RestrictedPluginManager
from nose.result import TextTestResult
from nose.suite import FinalizingSuiteWrapper
from nose.util import isclass, tolist
log = logging.getLogger('nose.core')
compat_24 = sys.version_info >= (2, 4)
__all__ = ['TestProgram', 'main', 'run', 'run_exit', 'runmodule', 'collector',
'TextTestRunner']
class TextTestRunner(unittest.TextTestRunner):
"""Test runner that uses nose's TextTestResult to enable errorClasses,
as well as providing hooks for plugins to override or replace the test
output stream, results, and the test case itself.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
config=None):
if config is None:
config = Config()
self.config = config
unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
def _makeResult(self):
return TextTestResult(self.stream,
self.descriptions,
self.verbosity,
self.config)
def run(self, test):
"""Overrides to provide plugin hooks and defer all output to
the test result class.
"""
wrapper = self.config.plugins.prepareTest(test)
if wrapper is not None:
test = wrapper
# plugins can decorate or capture the output stream
wrapped = self.config.plugins.setOutputStream(self.stream)
if wrapped is not None:
self.stream = wrapped
result = self._makeResult()
start = time.time()
test(result)
stop = time.time()
result.printErrors()
result.printSummary(start, stop)
self.config.plugins.finalize(result)
return result
class TestProgram(unittest.TestProgram):
"""Collect and run tests, returning success or failure.
The arguments to TestProgram() are the same as to
:func:`main()` and :func:`run()`:
* module: All tests are in this module (default: None)
* defaultTest: Tests to load (default: '.')
* argv: Command line arguments (default: None; sys.argv is read)
* testRunner: Test runner instance (default: None)
* testLoader: Test loader instance (default: None)
* env: Environment; ignored if config is provided (default: None;
os.environ is read)
* config: :class:`nose.config.Config` instance (default: None)
* suite: Suite or list of tests to run (default: None). Passing a
suite or lists of tests will bypass all test discovery and
loading. *ALSO NOTE* that if you pass a unittest.TestSuite
instance as the suite, context fixtures at the class, module and
package level will not be used, and many plugin hooks will not
be called. If you want normal nose behavior, either pass a list
of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
* exit: Exit after running tests and printing report (default: True)
* plugins: List of plugins to use; ignored if config is provided
(default: load plugins with DefaultPluginManager)
* addplugins: List of **extra** plugins to use. Pass a list of plugin
instances in this argument to make custom plugins available while
still using the DefaultPluginManager.
"""
verbosity = 1
def __init__(self, module=None, defaultTest='.', argv=None,
testRunner=None, testLoader=None, env=None, config=None,
suite=None, exit=True, plugins=None, addplugins=None):
if env is None:
env = os.environ
if config is None:
config = self.makeConfig(env, plugins)
if addplugins:
config.plugins.addPlugins(addplugins)
self.config = config
self.suite = suite
self.exit = exit
extra_args = {}
if sys.version_info[0:2] >= (2,7):
extra_args['exit'] = exit
unittest.TestProgram.__init__(
self, module=module, defaultTest=defaultTest,
argv=argv, testRunner=testRunner, testLoader=testLoader,
**extra_args)
def makeConfig(self, env, plugins=None):
"""Load a Config, pre-filled with user config files if any are
found.
"""
cfg_files = all_config_files()
if plugins:
manager = PluginManager(plugins=plugins)
else:
manager = DefaultPluginManager()
return Config(
env=env, files=cfg_files, plugins=manager)
def parseArgs(self, argv):
"""Parse argv and env and configure running environment.
"""
self.config.configure(argv, doc=self.usage())
log.debug("configured %s", self.config)
# quick outs: version, plugins (optparse would have already
# caught and exited on help)
if self.config.options.version:
from nose import __version__
sys.stdout = sys.__stdout__
print "%s version %s" % (os.path.basename(sys.argv[0]), __version__)
sys.exit(0)
if self.config.options.showPlugins:
self.showPlugins()
sys.exit(0)
if self.testLoader is None:
self.testLoader = defaultTestLoader(config=self.config)
elif isclass(self.testLoader):
self.testLoader = self.testLoader(config=self.config)
plug_loader = self.config.plugins.prepareTestLoader(self.testLoader)
if plug_loader is not None:
self.testLoader = plug_loader
log.debug("test loader is %s", self.testLoader)
# FIXME if self.module is a string, add it to self.testNames? not sure
if self.config.testNames:
self.testNames = self.config.testNames
else:
self.testNames = tolist(self.defaultTest)
log.debug('defaultTest %s', self.defaultTest)
log.debug('Test names are %s', self.testNames)
if self.config.workingDir is not None:
os.chdir(self.config.workingDir)
self.createTests()
def createTests(self):
"""Create the tests to run. If a self.suite
is set, then that suite will be used. Otherwise, tests will be
loaded from the given test names (self.testNames) using the
test loader.
"""
log.debug("createTests called with %s", self.suite)
if self.suite is not None:
# We were given an explicit suite to run. Make sure it's
# loaded and wrapped correctly.
self.test = self.testLoader.suiteClass(self.suite)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames)
def runTests(self):
"""Run Tests. Returns true on success, false on failure, and sets
self.success to the same value.
"""
log.debug("runTests called")
if self.testRunner is None:
self.testRunner = TextTestRunner(stream=self.config.stream,
verbosity=self.config.verbosity,
config=self.config)
plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
if plug_runner is not None:
self.testRunner = plug_runner
result = self.testRunner.run(self.test)
self.success = result.wasSuccessful()
if self.exit:
sys.exit(not self.success)
return self.success
def showPlugins(self):
"""Print list of available plugins.
"""
import textwrap
class DummyParser:
def __init__(self):
self.options = []
def add_option(self, *arg, **kw):
self.options.append((arg, kw.pop('help', '')))
v = self.config.verbosity
self.config.plugins.sort()
for p in self.config.plugins:
print "Plugin %s" % p.name
if v >= 2:
print " score: %s" % p.score
print '\n'.join(textwrap.wrap(p.help().strip(),
initial_indent=' ',
subsequent_indent=' '))
if v >= 3:
print
print " Options:"
parser = DummyParser()
p.addOptions(parser)
for opts, help in parser.options:
print ' %s' % (', '.join(opts))
if help:
print '\n'.join(
textwrap.wrap(help.strip(),
initial_indent=' ',
subsequent_indent=' '))
print
def usage(cls):
import nose
if hasattr(nose, '__loader__'):
ld = nose.__loader__
if hasattr(ld, 'zipfile'):
# nose was imported from a zipfile
return ld.get_data(
os.path.join(ld.prefix, 'nose', 'usage.txt'))
return open(os.path.join(
os.path.dirname(__file__), 'usage.txt'), 'r').read()
usage = classmethod(usage)
# backwards compatibility
run_exit = main = TestProgram
def run(*arg, **kw):
"""Collect and run tests, returning success or failure.
The arguments to `run()` are the same as to `main()`:
* module: All tests are in this module (default: None)
* defaultTest: Tests to load (default: '.')
* argv: Command line arguments (default: None; sys.argv is read)
* testRunner: Test runner instance (default: None)
* testLoader: Test loader instance (default: None)
* env: Environment; ignored if config is provided (default: None;
os.environ is read)
* config: :class:`nose.config.Config` instance (default: None)
* suite: Suite or list of tests to run (default: None). Passing a
suite or lists of tests will bypass all test discovery and
loading. *ALSO NOTE* that if you pass a unittest.TestSuite
instance as the suite, context fixtures at the class, module and
package level will not be used, and many plugin hooks will not
be called. If you want normal nose behavior, either pass a list
of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
* plugins: List of plugins to use; ignored if config is provided
(default: load plugins with DefaultPluginManager)
* addplugins: List of **extra** plugins to use. Pass a list of plugin
instances in this argument to make custom plugins available while
still using the DefaultPluginManager.
With the exception that the ``exit`` argument is always set
to False.
"""
kw['exit'] = False
return TestProgram(*arg, **kw).success
def runmodule(name='__main__', **kw):
"""Collect and run tests in a single module only. Defaults to running
tests in __main__. Additional arguments to TestProgram may be passed
as keyword arguments.
"""
main(defaultTest=name, **kw)
def collector():
"""TestSuite replacement entry point. Use anywhere you might use a
unittest.TestSuite. The collector will, by default, load options from
all config files and execute loader.loadTestsFromNames() on the
configured testNames, or '.' if no testNames are configured.
"""
# plugins that implement any of these methods are disabled, since
# we don't control the test runner and won't be able to run them
# finalize() is also not called, but plugins that use it aren't disabled,
# because capture needs it.
setuptools_incompat = ('report', 'prepareTest',
'prepareTestLoader', 'prepareTestRunner',
'setOutputStream')
plugins = RestrictedPluginManager(exclude=setuptools_incompat)
conf = Config(files=all_config_files(),
plugins=plugins)
conf.configure(argv=['collector'])
loader = defaultTestLoader(conf)
if conf.testNames:
suite = loader.loadTestsFromNames(conf.testNames)
else:
suite = loader.loadTestsFromNames(('.',))
return FinalizingSuiteWrapper(suite, plugins.finalize)
if __name__ == '__main__':
main()
| jokajak/itweb | data/env/lib/python2.6/site-packages/nose-0.11.4-py2.6.egg/nose/core.py | Python | gpl-3.0 | 12,663 |
#!/usr/bin/python
"""
Delete Snapshots: Script to delete system snapshots.
This script using the XMLRPC APIs will connect to the Satellite and
list or delete system snapshots based on the parameters given by the user.
Copyright (c) 2009--2015 Red Hat, Inc. Distributed under GPL.
Author: Brad Buckingham <bbuckingham@redhat.com>
"""
import os
import sys
import xmlrpclib
from time import strptime
from datetime import datetime
_topdir = '/usr/share/rhn'
if _topdir not in sys.path:
sys.path.append(_topdir)
from optparse import OptionParser, Option
from spacewalk.common.cli import getUsernamePassword, xmlrpc_login, xmlrpc_logout
client = None
options_table = [
Option("-v", "--verbose", action="count",
help="Increase verbosity"),
Option("-u", "--username", action="store",
help="Username"),
Option("-p", "--password", action="store",
help="Password"),
Option("-d", "--delete", action="count",
help="Delete snapshots."),
Option("-l", "--list", action="count",
help="List snapshot summary."),
Option("-L", "--long-list", action="count",
help="Display comprehensive snapshot list."),
Option("-a", "--all", action="count",
help="Include all snapshots based on criteria provided."),
Option("--start-date", action="store",
help="Include only snapshots taken on or after this date. Must be in the format 'YYYYMMDDHH24MISS'."),
Option("--end-date", action="store",
help="Include only snapshots taken on or before this date. Must be in the format 'YYYYMMDDHH24MISS'."),
Option("--satellite", action="store",
help="Server."),
Option("--system-id", action="append",
help="System Id."),
Option("--snapshot-id", action="append",
help="Snapshot Id."),
]
options = None
def main():
global client, options
parser = OptionParser(option_list=options_table)
(options, _args) = parser.parse_args()
processCommandLine()
satellite_url = "http://%s/rpc/api" % options.satellite
if options.verbose:
print "start date=", options.start_date
print "end date=", options.end_date
print "connecting to %s" % satellite_url
client = xmlrpclib.Server(satellite_url, verbose=0)
username, password = getUsernamePassword(options.username,
options.password)
sessionKey = xmlrpc_login(client, username, password, options.verbose)
if options.all:
if options.start_date and options.end_date:
deleteAllBetweenDates(sessionKey, options.start_date,
options.end_date)
elif options.start_date:
deleteAllAfterDate(sessionKey, options.start_date)
else:
deleteAll(sessionKey)
elif options.system_id:
if options.start_date and options.end_date:
deleteBySystemBetweenDates(sessionKey, options.system_id,
options.start_date, options.end_date)
elif options.start_date:
deleteBySystemAfterDate(sessionKey, options.system_id,
options.start_date)
else:
deleteBySystem(sessionKey, options.system_id)
elif options.snapshot_id:
deleteBySnapshotId(sessionKey, options.snapshot_id)
if options.verbose:
print "Delete Snapshots Completed successfully"
xmlrpc_logout(client, sessionKey, options.verbose)
def deleteAllBetweenDates(sessionKey, startDate, endDate):
"""
Delete all snapshots where the snapshot was created either on or between
the dates provided.
"""
if options.verbose:
print "...executing deleteAllBetweenDates..."
systems = client.system.listSystems(sessionKey)
for system in systems:
snapshots = client.system.provisioning.snapshot.listSnapshots(
sessionKey, system.get('id'), {"startDate": startDate,
"endDate": endDate})
if options.list:
listSnapshots(system.get('id'), snapshots)
elif options.long_list:
listSnapshotsLong(system.get('id'), snapshots)
else:
client.system.provisioning.snapshot.deleteSnapshots(sessionKey,
{"startDate": startDate, "endDate": endDate})
def deleteAllAfterDate(sessionKey, startDate):
"""
Delete all snapshots where the snapshot was created either on or after
the date provided.
"""
if options.verbose:
print "...executing deleteAllAfterDate..."
systems = client.system.listSystems(sessionKey)
for system in systems:
snapshots = client.system.provisioning.snapshot.listSnapshots(
sessionKey, system.get('id'), {"startDate": startDate})
if options.list:
listSnapshots(system.get('id'), snapshots)
elif options.long_list:
listSnapshotsLong(system.get('id'), snapshots)
else:
client.system.provisioning.snapshot.deleteSnapshots(sessionKey,
{"startDate": startDate})
def deleteAll(sessionKey):
"""
Delete all snapshots across all systems that the user has access to.
"""
if options.verbose:
print "...executing deleteAll..."
systems = client.system.listSystems(sessionKey)
for system in systems:
snapshots = client.system.provisioning.snapshot.listSnapshots(
sessionKey, system.get('id'), {})
if options.list:
listSnapshots(system.get('id'), snapshots)
elif options.long_list:
listSnapshotsLong(system.get('id'), snapshots)
else:
client.system.provisioning.snapshot.deleteSnapshots(sessionKey,
{})
def deleteBySystemBetweenDates(sessionKey, systemIds, startDate, endDate):
"""
Delete the snapshots for the systems provided where the snapshot was
created either on or between the dates provided.
"""
if options.verbose:
print "...executing deleteBySystemBetweenDates..."
for systemId in systemIds:
systemId = int(systemId)
try:
snapshots = client.system.provisioning.snapshot.listSnapshots(
sessionKey, systemId, {"startDate": startDate,
"endDate": endDate})
if options.list:
listSnapshots(systemId, snapshots)
elif options.long_list:
listSnapshotsLong(systemId, snapshots)
else:
client.system.provisioning.snapshot.deleteSnapshots(
sessionKey, systemId,
{"startDate": startDate, "endDate": endDate})
except xmlrpclib.Fault, e:
# print an error and go to the next system
sys.stderr.write("Error: %s\n" % e.faultString)
def deleteBySystemAfterDate(sessionKey, systemIds, startDate):
"""
Delete the snapshots for the systems provided where the snapshot was
created either on or after the date provided.
"""
if options.verbose:
print "...executing deleteBySystemAfterDate..."
for systemId in systemIds:
systemId = int(systemId)
try:
snapshots = client.system.provisioning.snapshot.listSnapshots(
sessionKey, systemId, {"startDate": startDate})
if options.list:
listSnapshots(systemId, snapshots)
elif options.long_list:
listSnapshotsLong(systemId, snapshots)
else:
client.system.provisioning.snapshot.deleteSnapshots(
sessionKey, systemId, {"startDate": startDate})
except xmlrpclib.Fault, e:
# print an error and go to the next system
sys.stderr.write("Error: %s\n" % e.faultString)
def deleteBySystem(sessionKey, systemIds):
"""
Delete all snapshots for the systems provided.
"""
if options.verbose:
print "...executing deleteBySystem..."
for systemId in systemIds:
systemId = int(systemId)
try:
snapshots = client.system.provisioning.snapshot.listSnapshots(
sessionKey, systemId, {})
if options.list:
listSnapshots(systemId, snapshots)
elif options.long_list:
listSnapshotsLong(systemId, snapshots)
else:
client.system.provisioning.snapshot.deleteSnapshots(
sessionKey, systemId, {})
except xmlrpclib.Fault, e:
# print an error and go to the next system
sys.stderr.write("Error: %s\n" % e.faultString)
def deleteBySnapshotId(sessionKey, snapshotIds):
"""
Delete the list of snapshots provided. If the user does not have
access to one or more of those snapshots, they will be ignored.
"""
if options.verbose:
print "...executing deleteBySnapshotId..."
for snapshotId in snapshotIds:
try:
if options.list:
print "snapshotId: ", snapshotId
else:
client.system.provisioning.snapshot.deleteSnapshot(sessionKey,
int(snapshotId))
except xmlrpclib.Fault, e:
# print an error and go to the next system
sys.stderr.write("Error: %s\n" % e.faultString)
def listSnapshots(systemId, snapshots):
"""
List to stdout the snapshot summaries for the system provided.
This will include:
system id, # snapshots, date of oldest snapshot, date of newest snapshot
"""
if len(snapshots) > 0:
# obtain the dates of the oldest and newest snapshot...
#
# the dates will be in dateTime.iso8601 format
# (e.g. 20090325T13:18:11); therefore, convert them to a
# friendlier format (e.g. 2009-03-25 13:18:11) for output
newest = snapshots[0].get('created')
newest = datetime(*(strptime(newest.value, "%Y%m%dT%H:%M:%S")[0:6]))
oldest = snapshots[len(snapshots) - 1].get('created')
oldest = datetime(*(strptime(oldest.value, "%Y%m%dT%H:%M:%S")[0:6]))
print "systemId: %d, snapshots: %d, oldest: %s, newest: %s" \
% (systemId, len(snapshots), oldest, newest)
def listSnapshotsLong(systemId, snapshots):
"""
List to stdout the comprehensive summaries of snapshots for the system provided.
"""
for snapshot in snapshots:
print "systemId: %d, snapshotId: %d, created: %s, reason: %s" % \
(systemId,
snapshot['id'],
datetime(*(strptime(snapshot['created'].value, "%Y%m%dT%H:%M:%S")[0:6])),
snapshot['reason'])
def processCommandLine():
if not options.satellite:
options.satellite = os.uname()[1]
if not options.delete and not options.list and not options.long_list:
sys.stderr.write("Must include a command options (--list, --long-list, --delete)\n")
sys.exit(1)
if not options.all and not options.system_id and not options.snapshot_id:
sys.stderr.write("Must include one of the required parameters (--all, --system-id or --snapshot-id\n")
sys.exit(1)
if options.snapshot_id and (options.start_date or options.end_date):
sys.stderr.write("--start-date and --end-date options do not apply when specifying --snapshot-id\n")
sys.exit(1)
if options.end_date and not options.start_date:
sys.stderr.write("--end-date must be used with --start-date.\n")
sys.exit(1)
if options.list and options.long_list:
sys.stderr.write("-l (--list) and -L (--long-list) are mutually exclusive.\n")
sys.exit(1)
# convert the start / end dates to a format that usable by the xmlrpc api
if options.start_date:
options.start_date = datetime(*(strptime(options.start_date, "%Y%m%d%H%M%S")[0:6]))
options.start_date = xmlrpclib.DateTime(options.start_date.timetuple())
if options.end_date:
options.end_date = datetime(*(strptime(options.end_date, "%Y%m%d%H%M%S")[0:6]))
options.end_date = xmlrpclib.DateTime(options.end_date.timetuple())
if __name__ == '__main__':
sys.exit(main() or 0)
| xkollar/spacewalk | utils/systemSnapshot.py | Python | gpl-2.0 | 12,601 |
import os
import time
from .util import do_commit
def delete_file_taggings(cursor, file_id):
"""Delete all taggings relating to a specific file_id
Returns
========
The number of affected rows (number of taggings removed)
"""
cursor.execute('DELETE FROM file_tag WHERE file_id = ?', (file_id,))
r = cursor.rowcount
do_commit(cursor)
return r
def file_info(path):
"""Given a path to a file, return a (dirname, file) tuple
that could be used to lookup that file's id in `file` table.
(using 'SELECT id FROM file WHERE directory=?, name=?')
Notes
======
The resultant path is absolute but not fully normalized
(that is, no attempt is made to resolve symlinks).
Use os.path.realpath() before passing the path to file_info,
if that is what you need.
"""
return os.path.split(os.path.abspath(path))
def file_mtime(path):
"""Return the file's mtime, as a string suitable for storing in the database.
We cannot return a datetime object, because datetimes
do not support nanosecond resolution.
Trailing zeros are truncated, to match TMSU's implementation.
If nanoseconds == 0, the decimal part is omitted entirely.
"""
base = os.stat(path)
t = time.gmtime(base.st_mtime)
nano = base.st_mtime_ns % 1000000000
if nano > 0:
nano = str(nano)
while nano[-1] == '0':
nano = nano[:-1]
nano = '.' + nano
else:
nano = ''
return time.strftime('%Y-%m-%d %H:%M:%S' + nano, t)
def file_id(cursor, path):
"""Return the file.id of the given path.
If the path is not yet tagged, return None.
"""
if hasattr(cursor.connection, 'normalize_path'):
print('normalizing %r' % path)
print('relpath = %r' % cursor.connection._relpath)
np = cursor.connection.normalize_path(path)
print('normpath = %r' % np)
dirname, filename = os.path.split(np)
print('->%r %r' % (dirname, filename))
else:
dirname, filename = file_info(path)
results = list(cursor.execute('SELECT id FROM file'
' WHERE directory=? AND name=?', (dirname, filename)))
if len(results) > 1:
raise ValueError('Duplicate entry in file table')
if results:
return results[0][0]
return None
def file_ids(cursor, paths):
"""Return a path: file.id map for the given paths."""
# XXX is slow?
return {p: file_id(cursor, p) for p in paths}
def dir_contains(path, querypath):
"""Given two paths, return whether querypath is inside path.
This does not check whether either path exists on disk, it is
purely a logical operation.
Examples
=========
>>> dir_contains('/foo/bar', '/foo/bar/1/2/3/4')
True
>>> dir_contains('/foo/bar', '/foo/ba')
False
>>> dir_contains('/foo/bar', '/foo/bar')
False
"""
slashed = os.path.abspath(path) + os.path.sep
querypath = os.path.abspath(querypath)
return querypath.startswith(slashed)
def rename_path(cursor, oldpath, newpath, update_only=False):
"""Rename a path, updating the database to match.
If the specified path is a directory, then info for all files
contained in that directory branch will also be updated.
This is equivalent to
`mv "$OLDPATH" "$NEWPATH"; tmsu repair --manual "$OLDPATH" "$NEWPATH"`.
Parameters
===========
update_only Do not modify the filesystem, only the database.
Useful when you need to fix the database to conform
to a rename that's already happened.
Returns
========
n The number of `file` table rows affected.
1 for renaming a file, 1+N for renaming a directory,
where N is the number of tagged files/directories
within that directory, recursively.
(Note that N is specifically -not- the number of files
/directories within that directory; only the ones that
are currently tagged.)
Raises
=======
KeyError If there is no record of this path in the database.
OSError If the database records that the given path is a directory
but the OS says it is not, or vice versa.
OSError If the destination path doesn't exist (eg.
you are trying to rename /foo/bar.png to /foo/baz/bar.png,
but /foo/baz doesn't exist.)
FileNotFoundError If oldpath doesn't exist on disk, and you haven't
specified update_only=True.
"""
if (not update_only) and (not os.path.exists(oldpath)):
raise FileNotFoundError(oldpath)
oldpath = os.path.abspath(oldpath)
newpath = os.path.abspath(newpath)
isdir = os.path.isdir(oldpath) or os.path.isdir(os.path.realpath(oldpath))
db_isdir = cursor.execute('SELECT is_dir FROM file'
' WHERE directory = ? AND name = ?',
file_info(oldpath)).fetchone()
if db_isdir:
db_isdir = db_isdir[0]
else:
db_isdir = isdir
if isdir != db_isdir:
raise OSError('OS reports isdir=%r,'
' but database reports isdir=%r' % (isdir, db_isdir))
if isdir:
if not os.path.exists(os.path.dirname(newpath)):
raise OSError('Attempt to move {} into a nonexistent'
' directory {} with name {}'.format(
oldpath,
os.path.dirname(newpath),
os.path.basename(newpath)))
id = cursor.execute('SELECT id FROM file'
' WHERE directory=? AND name = ?',
file_info(oldpath)).fetchone()
if id:
id = id[0]
pattern = oldpath + os.path.sep + '%'
idmap = {}
for id, directory in cursor.execute('SELECT id, directory FROM file'
' WHERE directory=?'
' OR directory like ?',
(oldpath, pattern)):
if dir_contains(oldpath, directory):
tmp = directory.split(oldpath, 1)
if len(tmp) != 2:
raise ValueError('Attempted to split %r by %r,'
' but got %r!' % (directory,
oldpath,
tmp))
tmp[0] = newpath
rewritten = "".join(tmp)
import sys
print ('%r -> %r' % (directory, rewritten), file=sys.stderr)
idmap[id] = (directory, rewritten)
elif directory == oldpath:
idmap[id] = (oldpath, newpath)
print ('idmap: %r' % (idmap,))
# XXX actually make changes.
else:
import sys
sys.exit(1)
# blocked off for now.
id = cursor.execute('SELECT id FROM file'
' WHERE directory=? AND name = ?',
file_info(oldpath)).fetchone()
if not id:
raise KeyError('No record referring to %r found.' % (oldpath,))
id = id[0]
if not update_only:
newdir = os.path.dirname(newpath)
if not os.path.exists(newpath):
raise OSError('Attempt to move {} into a nonexistent'
' directory {}'.format(oldpath, newdir))
os.rename(oldpath, newpath)
cursor.execute('UPDATE file SET directory=?, name=? WHERE id=?',
file_info(newpath) + (id,))
do_commit()
def move_paths(cursor, paths, destdir):
"""Move all `paths` to destdir, updating database accordingly.
Files that are not currently tagged will just be moved, with no updates to
the database.
Raises
=======
ValueError If a path points to a directory, but other paths point at
files inside that directory.
move_files() does not do recursive moves,
use rename_path() on the parent directory for that.
"""
raise NotImplementedError('tmsoup.file.move_paths()')
def parse_args(args):
from argparse import ArgumentParser
parser = ArgumentParser(description='Rename or get info about files/directories')
subp = parser.add_subparsers(dest='cmd')
subp.required = True
rename = subp.add_parser('rename',
help='Rename a file or directory')
rename.add_argument('oldname')
rename.add_argument('newname')
return parser.parse_args(args)
def main(argv):
import sqlite3
from tmsoup.core import get_db_path
args = parse_args(argv)
conn = sqlite3.connect(get_db_path())
cursor = conn.cursor()
if not os.path.isdir(args.oldname):
raise ValueError('directories only, for now.')
rename_path(cursor, args.oldname, args.newname)
__all__ = ('delete_file_taggings', 'file_id', 'file_ids', 'file_mtime',
'file_info')
if __name__ == '__main__':
import sys
main(sys.argv[1:])
| 0ion9/tmsoup | tmsoup/file.py | Python | lgpl-3.0 | 9,291 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: "vyos_system"
version_added: "2.3"
author: "Nathaniel Case (@qalthos)"
short_description: Run `set system` commands on VyOS devices
description:
- Runs one or more commands on remote devices running VyOS.
This module can also be introspected to validate key parameters before
returning successfully.
extends_documentation_fragment: vyos
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- The new domain name to apply to the device.
name_server:
description:
- A list of name servers to use with the device. Mutually exclusive with
I(domain_search)
required: false
default: null
domain_search:
description:
- A list of domain names to search. Mutually exclusive with
I(name_server)
state:
description:
- Whether to apply (C(present)) or remove (C(absent)) the settings.
default: present
choices: ['present', 'absent']
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set system hostname vyos01
- set system domain-name foo.example.com
"""
EXAMPLES = """
- name: configure hostname and domain-name
vyos_system:
hostname: vyos01
domain_name: test.example.com
- name: remove all configuration
vyos_system:
state: absent
- name: configure name servers
vyos_system:
name_server:
- 8.8.8.8
- 8.8.4.4
- name: configure domain search suffixes
vyos_system:
domain_search:
- sub1.example.com
- sub2.example.com
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vyos import get_config, load_config
from ansible.module_utils.vyos import vyos_argument_spec, check_args
def spec_key_to_device_key(key):
device_key = key.replace('_', '-')
# domain-search is longer than just it's key
if device_key == 'domain-search':
device_key += ' domain'
return device_key
def config_to_dict(module):
data = get_config(module)
config = {'domain_search': [], 'name_server': []}
for line in data.split('\n'):
if line.startswith('set system host-name'):
config['host_name'] = line[22:-1]
elif line.startswith('set system domain-name'):
config['domain_name'] = line[24:-1]
elif line.startswith('set system domain-search domain'):
config['domain_search'].append(line[33:-1])
elif line.startswith('set system name-server'):
config['name_server'].append(line[24:-1])
return config
def spec_to_commands(want, have):
commands = []
state = want.pop('state')
# state='absent' by itself has special meaning
if state == 'absent' and all(v is None for v in want.values()):
# Clear everything
for key in have:
commands.append('delete system %s' % spec_key_to_device_key(key))
for key in want:
if want[key] is None:
continue
current = have.get(key)
proposed = want[key]
device_key = spec_key_to_device_key(key)
# These keys are lists which may need to be reconciled with the device
if key in ['domain_search', 'name_server']:
if not proposed:
# Empty list was passed, delete all values
commands.append("delete system %s" % device_key)
for config in proposed:
if state == 'absent' and config in current:
commands.append("delete system %s '%s'" % (device_key, config))
elif state == 'present' and config not in current:
commands.append("set system %s '%s'" % (device_key, config))
else:
if state == 'absent' and current and proposed:
commands.append('delete system %s' % device_key)
elif state == 'present' and proposed and proposed != current:
commands.append("set system %s '%s'" % (device_key, proposed))
return commands
def map_param_to_obj(module):
return {
'host_name': module.params['host_name'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'name_server': module.params['name_server'],
'state': module.params['state']
}
def main():
argument_spec = dict(
host_name=dict(type='str'),
domain_name=dict(type='str'),
domain_search=dict(type='list'),
name_server=dict(type='list'),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[('domain_name', 'domain_search')],
)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
want = map_param_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands(want, have)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| dav1x/ansible | lib/ansible/modules/network/vyos/vyos_system.py | Python | gpl-3.0 | 6,270 |
import unittest
from vodem.api import standby_dns_manual
class TestStandbyDnsManual(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.valid_response = {
'standby_dns_manual': '',
}
def test_call(self):
resp = standby_dns_manual()
self.assertEqual(self.valid_response, resp)
| alzeih/python-vodem-vodafone-K4607-Z | test/unit/api/test_standby_dns_manual.py | Python | mit | 343 |
"""
Support for the Unitymedia Horizon HD Recorder.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/media_player.horizon/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant import util
from homeassistant.components.media_player import (
MEDIA_TYPE_CHANNEL, PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, STATE_OFF, STATE_PAUSED, STATE_PLAYING)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['einder==0.3.1']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Horizon'
DEFAULT_PORT = 5900
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
SUPPORT_HORIZON = SUPPORT_NEXT_TRACK | SUPPORT_PAUSE | SUPPORT_PLAY | \
SUPPORT_PLAY_MEDIA | SUPPORT_PREVIOUS_TRACK | SUPPORT_TURN_ON | \
SUPPORT_TURN_OFF
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Horizon platform."""
from einder import Client, keys
from einder.exceptions import AuthenticationError
host = config[CONF_HOST]
name = config[CONF_NAME]
port = config[CONF_PORT]
try:
client = Client(host, port=port)
except AuthenticationError as msg:
_LOGGER.error("Authentication to %s at %s failed: %s", name, host, msg)
return
except OSError as msg:
# occurs if horizon box is offline
_LOGGER.error("Connection to %s at %s failed: %s", name, host, msg)
raise PlatformNotReady
_LOGGER.info("Connection to %s at %s established", name, host)
add_entities([HorizonDevice(client, name, keys)], True)
class HorizonDevice(MediaPlayerDevice):
"""Representation of a Horizon HD Recorder."""
def __init__(self, client, name, keys):
"""Initialize the remote."""
self._client = client
self._name = name
self._state = None
self._keys = keys
@property
def name(self):
"""Return the name of the remote."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_HORIZON
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Update State using the media server running on the Horizon."""
if self._client.is_powered_on():
self._state = STATE_PLAYING
else:
self._state = STATE_OFF
def turn_on(self):
"""Turn the device on."""
if self._state is STATE_OFF:
self._send_key(self._keys.POWER)
def turn_off(self):
"""Turn the device off."""
if self._state is not STATE_OFF:
self._send_key(self._keys.POWER)
def media_previous_track(self):
"""Channel down."""
self._send_key(self._keys.CHAN_DOWN)
self._state = STATE_PLAYING
def media_next_track(self):
"""Channel up."""
self._send_key(self._keys.CHAN_UP)
self._state = STATE_PLAYING
def media_play(self):
"""Send play command."""
self._send_key(self._keys.PAUSE)
self._state = STATE_PLAYING
def media_pause(self):
"""Send pause command."""
self._send_key(self._keys.PAUSE)
self._state = STATE_PAUSED
def media_play_pause(self):
"""Send play/pause command."""
self._send_key(self._keys.PAUSE)
if self._state == STATE_PAUSED:
self._state = STATE_PLAYING
else:
self._state = STATE_PAUSED
def play_media(self, media_type, media_id, **kwargs):
"""Play media / switch to channel."""
if MEDIA_TYPE_CHANNEL == media_type:
try:
self._select_channel(int(media_id))
self._state = STATE_PLAYING
except ValueError:
_LOGGER.error("Invalid channel: %s", media_id)
else:
_LOGGER.error("Invalid media type %s. Supported type: %s",
media_type, MEDIA_TYPE_CHANNEL)
def _select_channel(self, channel):
"""Select a channel (taken from einder library, thx)."""
self._send(channel=channel)
def _send_key(self, key):
"""Send a key to the Horizon device."""
self._send(key=key)
def _send(self, key=None, channel=None):
"""Send a key to the Horizon device."""
from einder.exceptions import AuthenticationError
try:
if key:
self._client.send_key(key)
elif channel:
self._client.select_channel(channel)
except OSError as msg:
_LOGGER.error("%s disconnected: %s. Trying to reconnect...",
self._name, msg)
# for reconnect, first gracefully disconnect
self._client.disconnect()
try:
self._client.connect()
self._client.authorize()
except AuthenticationError as msg:
_LOGGER.error("Authentication to %s failed: %s", self._name,
msg)
return
except OSError as msg:
# occurs if horizon box is offline
_LOGGER.error("Reconnect to %s failed: %s", self._name, msg)
return
self._send(key=key, channel=channel)
| persandstrom/home-assistant | homeassistant/components/media_player/horizon.py | Python | apache-2.0 | 5,979 |
from __future__ import unicode_literals
from future.builtins import filter, str
try:
from urllib.parse import urljoin
except ImportError: # Python 2
from urlparse import urljoin
from django.core.urlresolvers import resolve, reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.models import Displayable, Orderable, RichText
from mezzanine.pages.fields import MenusField
from mezzanine.pages.managers import PageManager
from mezzanine.utils.urls import path_to_slug
class BasePage(Orderable, Displayable):
"""
Exists solely to store ``PageManager`` as the main manager.
If it's defined on ``Page``, a concrete model, then each
``Page`` subclass loses the custom manager.
"""
objects = PageManager()
class Meta:
abstract = True
@python_2_unicode_compatible
class Page(BasePage):
"""
A page in the page tree. This is the base class that custom content types
need to subclass.
"""
parent = models.ForeignKey("Page", blank=True, null=True,
related_name="children")
in_menus = MenusField(_("Show in menus"), blank=True, null=True)
titles = models.CharField(editable=False, max_length=1000, null=True)
content_model = models.CharField(editable=False, max_length=50, null=True)
login_required = models.BooleanField(_("Login required"), default=False,
help_text=_("If checked, only logged in users can view this page"))
class Meta:
verbose_name = _("Page")
verbose_name_plural = _("Pages")
ordering = ("titles",)
order_with_respect_to = "parent"
def __str__(self):
return self.titles
def get_absolute_url(self):
"""
URL for a page - for ``Link`` page types, simply return its
slug since these don't have an actual URL pattern. Also handle
the special case of the homepage being a page object.
"""
slug = self.slug
if self.content_model == "link":
# Ensure the URL is absolute.
slug = urljoin('/', slug)
return slug
if slug == "/":
return reverse("home")
else:
return reverse("page", kwargs={"slug": slug})
def save(self, *args, **kwargs):
"""
Create the titles field using the titles up the parent chain
and set the initial value for ordering.
"""
if self.id is None:
self.content_model = self._meta.object_name.lower()
titles = [self.title]
parent = self.parent
while parent is not None:
titles.insert(0, parent.title)
parent = parent.parent
self.titles = " / ".join(titles)
super(Page, self).save(*args, **kwargs)
def description_from_content(self):
"""
Override ``Displayable.description_from_content`` to load the
content type subclass for when ``save`` is called directly on a
``Page`` instance, so that all fields defined on the subclass
are available for generating the description.
"""
if self.__class__ == Page:
content_model = self.get_content_model()
if content_model:
return content_model.description_from_content()
return super(Page, self).description_from_content()
def get_ascendants(self, for_user=None):
"""
Returns the ascendants for the page. Ascendants are cached in
the ``_ascendants`` attribute, which is populated when the page
is loaded via ``Page.objects.with_ascendants_for_slug``.
"""
if not self.parent_id:
# No parents at all, bail out.
return []
if not hasattr(self, "_ascendants"):
# _ascendants has not been either page.get_ascendants or
# Page.objects.assigned by with_ascendants_for_slug, so
# run it to see if we can retrieve all parents in a single
# query, which will occur if the slugs for each of the pages
# have not been customised.
if self.slug:
kwargs = {"for_user": for_user}
pages = Page.objects.with_ascendants_for_slug(self.slug,
**kwargs)
self._ascendants = pages[0]._ascendants
else:
self._ascendants = []
if not self._ascendants:
# Page has a parent but with_ascendants_for_slug failed to
# find them due to custom slugs, so retrieve the parents
# recursively.
child = self
while child.parent_id is not None:
self._ascendants.append(child.parent)
child = child.parent
return self._ascendants
@classmethod
def get_content_models(cls):
"""
Return all Page subclasses.
"""
is_content_model = lambda m: m is not Page and issubclass(m, Page)
return list(filter(is_content_model, models.get_models()))
def get_content_model(self):
"""
Provies a generic method of retrieving the instance of the custom
content type's model for this page.
"""
return getattr(self, self.content_model, None)
def get_slug(self):
"""
Recursively build the slug from the chain of parents.
"""
slug = super(Page, self).get_slug()
if self.parent is not None:
return "%s/%s" % (self.parent.slug, slug)
return slug
def set_slug(self, new_slug):
"""
Changes this page's slug, and all other pages whose slugs
start with this page's slug.
"""
for page in Page.objects.filter(slug__startswith=self.slug):
if not page.overridden():
page.slug = new_slug + page.slug[len(self.slug):]
page.save()
self.slug = new_slug
def set_parent(self, new_parent):
"""
Change the parent of this page, changing this page's slug to match
the new parent if necessary.
"""
self_slug = self.slug
old_parent_slug = self.parent.slug if self.parent else ""
new_parent_slug = new_parent.slug if new_parent else ""
# Make sure setting the new parent won't cause a cycle.
parent = new_parent
while parent is not None:
if parent.pk == self.pk:
raise AttributeError("You can't set a page or its child as"
" a parent.")
parent = parent.parent
self.parent = new_parent
self.save()
if self_slug:
if not old_parent_slug:
self.set_slug("/".join((new_parent_slug, self.slug)))
elif self.slug.startswith(old_parent_slug):
new_slug = self.slug.replace(old_parent_slug,
new_parent_slug, 1)
self.set_slug(new_slug.strip("/"))
def overridden(self):
"""
Returns ``True`` if the page's slug has an explicitly defined
urlpattern and is therefore considered to be overridden.
"""
from mezzanine.pages.views import page
page_url = reverse("page", kwargs={"slug": self.slug})
resolved_view = resolve(page_url)[0]
return resolved_view != page
def can_add(self, request):
"""
Dynamic ``add`` permission for content types to override.
"""
return self.slug != "/"
def can_change(self, request):
"""
Dynamic ``change`` permission for content types to override.
"""
return True
def can_delete(self, request):
"""
Dynamic ``delete`` permission for content types to override.
"""
return True
def set_helpers(self, context):
"""
Called from the ``page_menu`` template tag and assigns a
handful of properties based on the current page, that are used
within the various types of menus.
"""
current_page = context["_current_page"]
current_page_id = getattr(current_page, "id", None)
current_parent_id = getattr(current_page, "parent_id", None)
# Am I a child of the current page?
self.is_current_child = self.parent_id == current_page_id
self.is_child = self.is_current_child # Backward compatibility
# Is my parent the same as the current page's?
self.is_current_sibling = self.parent_id == current_parent_id
# Am I the current page?
try:
request = context["request"]
except KeyError:
# No request context, most likely when tests are run.
self.is_current = False
else:
self.is_current = self.slug == path_to_slug(request.path_info)
# Is the current page me or any page up the parent chain?
def is_c_or_a(page_id):
parent_id = context.get("_parent_page_ids", {}).get(page_id)
return self.id == page_id or (parent_id and is_c_or_a(parent_id))
self.is_current_or_ascendant = lambda: bool(is_c_or_a(current_page_id))
self.is_current_parent = self.id == current_parent_id
# Am I a primary page?
self.is_primary = self.parent_id is None
# What's an ID I can use in HTML?
self.html_id = self.slug.replace("/", "-")
# Default branch level - gets assigned in the page_menu tag.
self.branch_level = 0
def in_menu_template(self, template_name):
if self.in_menus is not None:
for i, l, t in settings.PAGE_MENU_TEMPLATES:
if not str(i) in self.in_menus and t == template_name:
return False
return True
def get_template_name(self):
"""
Subclasses can implement this to provide a template to use
in ``mezzanine.pages.views.page``.
"""
return None
class RichTextPage(Page, RichText):
"""
Implements the default type of page with a single Rich Text
content field.
"""
class Meta:
verbose_name = _("Rich text page")
verbose_name_plural = _("Rich text pages")
class Link(Page):
"""
A general content type for creating external links in the page
menu.
"""
class Meta:
verbose_name = _("Link")
verbose_name_plural = _("Links")
| cccs-web/mezzanine | mezzanine/pages/models.py | Python | bsd-2-clause | 10,568 |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
##
## Test the datetime module
##
from iptest.assert_util import *
import datetime
def test_date():
#--------------------------------------------------------------------------
#basic sanity checks
x = datetime.date(2005,3,22)
AreEqual(x.year, 2005)
AreEqual(x.month, 3)
AreEqual(x.day, 22)
datetime.date(1,1,1)
datetime.date(9999, 12, 31)
datetime.date(2004, 2, 29)
AssertError(ValueError, datetime.date, 2005, 4,31)
AssertError(ValueError, datetime.date, 2005, 3,32)
AssertError(ValueError, datetime.datetime, 2006, 2, 29)
AssertError(ValueError, datetime.datetime, 2006, 9, 31)
AssertError(ValueError, datetime.date, 0, 1, 1)
AssertError(ValueError, datetime.date, 1, 0, 1)
AssertError(ValueError, datetime.date, 1, 1, 0)
AssertError(ValueError, datetime.date, 0, 0, 0)
AssertError(ValueError, datetime.date, -1, 1, 1)
AssertError(ValueError, datetime.date, 1, -1, 1)
AssertError(ValueError, datetime.date, 1, 1, -1)
AssertError(ValueError, datetime.date, -1, -1, -1)
AssertError(ValueError, datetime.date, -10, -10, -10)
AssertError(ValueError, datetime.date, 10000, 12, 31)
AssertError(ValueError, datetime.date, 9999, 13, 31)
AssertError(ValueError, datetime.date, 9999, 12, 32)
AssertError(ValueError, datetime.date, 10000, 13, 32)
AssertError(ValueError, datetime.date, 100000, 130, 320)
#add
x = datetime.date(2005, 3, 22) + datetime.timedelta(1)
AreEqual(x.year, 2005)
AreEqual(x.month, 3)
AreEqual(x.day, 23)
x = datetime.date(2005, 3, 22) + datetime.timedelta(0)
AreEqual(x.year, 2005)
AreEqual(x.month, 3)
AreEqual(x.day, 22)
#right add
x = datetime.timedelta(1) + datetime.date(2005, 3, 22)
AreEqual(x.year, 2005)
AreEqual(x.month, 3)
AreEqual(x.day, 23)
x = datetime.timedelta(0) + datetime.date(2005, 3, 22)
AreEqual(x.year, 2005)
AreEqual(x.month, 3)
AreEqual(x.day, 22)
#subtract
x = datetime.date(2005, 3, 22) - datetime.timedelta(1)
AreEqual(x.year, 2005)
AreEqual(x.month, 3)
AreEqual(x.day, 21)
x = datetime.date(2005, 3, 22) - datetime.timedelta(0)
AreEqual(x.year, 2005)
AreEqual(x.month, 3)
AreEqual(x.day, 22)
#equality
x = datetime.date(2005, 3, 22)
y = x
Assert(x==y)
y = datetime.date(2005, 3, 23)
Assert(not(x==y))
Assert(x==datetime.date(2005, 3, 22))
#inequality
x = datetime.date(2005, 3, 22)
y = None
Assert(x!=y)
y = datetime.date(2005, 3, 23)
Assert(x!=y)
#ge
Assert(datetime.date(2005, 3, 22) >= datetime.date(2005, 3, 22))
Assert(datetime.date(2005, 3, 23) >= datetime.date(2005, 3, 22))
Assert(datetime.date(2005, 3, 24) >= datetime.date(2005, 3, 22))
Assert(not (datetime.date(2005, 3, 21) >= datetime.date(2005, 3, 22)))
Assert(not (datetime.date(2005, 3, 20) >= datetime.date(2005, 3, 22)))
#le
Assert(datetime.date(2005, 3, 22) <= datetime.date(2005, 3, 22))
Assert(datetime.date(2005, 3, 22) <= datetime.date(2005, 3, 23))
Assert(datetime.date(2005, 3, 22) <= datetime.date(2005, 3, 24))
Assert(not (datetime.date(2005, 3, 22) <= datetime.date(2005, 3, 21)))
Assert(not (datetime.date(2005, 3, 22) <= datetime.date(2005, 3, 20)))
#gt
Assert(not (datetime.date(2005, 3, 22) > datetime.date(2005, 3, 22)))
Assert(datetime.date(2005, 3, 23) > datetime.date(2005, 3, 22))
Assert(datetime.date(2005, 3, 24) > datetime.date(2005, 3, 22))
Assert(not (datetime.date(2005, 3, 21) > datetime.date(2005, 3, 22)))
Assert(not (datetime.date(2005, 3, 20) > datetime.date(2005, 3, 22)))
#lt
Assert(not(datetime.date(2005, 3, 22) < datetime.date(2005, 3, 22)))
Assert(datetime.date(2005, 3, 22) < datetime.date(2005, 3, 23))
Assert(datetime.date(2005, 3, 22) < datetime.date(2005, 3, 24))
Assert(not (datetime.date(2005, 3, 22) < datetime.date(2005, 3, 21)))
Assert(not (datetime.date(2005, 3, 22) < datetime.date(2005, 3, 20)))
#hash
x = datetime.date(2005, 3, 22)
Assert(x.__hash__()!=None)
#reduce
x = datetime.date(2005, 3, 22)
Assert(x.__reduce__()[0] == datetime.date)
AreEqual(type(x.__reduce__()[1]), tuple)
#repr
AreEqual(repr(datetime.date(2005, 3, 22)), 'datetime.date(2005, 3, 22)')
#str
AreEqual(str(datetime.date(2005, 3, 22)), '2005-03-22')
#ctime
AreEqual(datetime.date(2005, 3, 22).ctime(), 'Tue Mar 22 00:00:00 2005')
#isocalendar
x = datetime.date(2005, 3, 22).isocalendar()
AreEqual(x[0], 2005)
AreEqual(x[1], 12)
AreEqual(x[2], 2)
#isoformat
x = datetime.date(2005, 3, 22).isoformat()
AreEqual(x, "2005-03-22")
#isoweekday
AreEqual(datetime.date(2005, 3, 22).isoweekday(), 2)
#replace
x = datetime.date(2005, 3, 22)
y = datetime.date(2005, 3, 22)
z = x.replace(year=1000)
AreEqual(y, x)
AreEqual(z.year, 1000)
z = x.replace(month=5)
AreEqual(y, x)
AreEqual(z.month, 5)
z = x.replace(day=25)
AreEqual(y, x)
AreEqual(z.day, 25)
z = x.replace(year=1000, month=5)
AreEqual(y, x)
AreEqual(z.year, 1000)
AreEqual(z.month, 5)
z = x.replace(year=1000, day=25)
AreEqual(y, x)
AreEqual(z.year, 1000)
AreEqual(z.day, 25)
z = x.replace(day=25, month=5)
AreEqual(y, x)
AreEqual(z.day, 25)
AreEqual(z.month, 5)
z = x.replace(day=25, month=5, year=1000)
AreEqual(y, x)
AreEqual(z.day, 25)
AreEqual(z.month, 5)
AreEqual(z.year, 1000)
#strftime
AreEqual(x.strftime("%y-%a-%b"), "05-Tue-Mar")
AreEqual(x.strftime("%Y-%A-%B"), "2005-Tuesday-March")
AreEqual(x.strftime("%Y%m%d"), '20050322')
#timetuple
AreEqual(datetime.date(2005, 3, 22).timetuple(), (2005, 3, 22, 0, 0, 0, 1, 81, -1))
#toordinal
AreEqual(datetime.date(2005, 3, 22).toordinal(), 732027)
#weekday
AreEqual(datetime.date(2005, 3, 22).weekday(), 1)
#fromordinal
x = datetime.date.fromordinal(1234567)
AreEqual(x.year, 3381)
AreEqual(x.month, 2)
AreEqual(x.day, 16)
#fromtimestamp
x = datetime.date.fromtimestamp(1000000000.0)
AreEqual(x.year, 2001)
AreEqual(x.month, 9)
AreEqual(x.day, 8)
#max
x = datetime.date.max
AreEqual(x.year, 9999)
AreEqual(x.month, 12)
AreEqual(x.day, 31)
#min
x = datetime.date.min
AreEqual(x.year, 1)
AreEqual(x.month, 1)
AreEqual(x.day, 1)
#resolution
AreEqual(repr(datetime.date.resolution), 'datetime.timedelta(1)')
#today
datetime.date.today()
#boolean op
Assert(datetime.date(2005, 3, 22))
def test_datetime():
x = datetime.datetime(2006,4,11,2,28,3,99,datetime.tzinfo())
AreEqual(x.year, 2006)
AreEqual(x.month, 4)
AreEqual(x.day, 11)
AreEqual(x.hour, 2)
AreEqual(x.minute, 28)
AreEqual(x.second, 3)
AreEqual(x.microsecond, 99)
datetime.datetime(2006,4,11)
datetime.datetime(2006,4,11,2)
datetime.datetime(2006,4,11,2,28)
datetime.datetime(2006,4,11,2,28,3)
datetime.datetime(2006,4,11,2,28,3,99)
datetime.datetime(2006,4,11,2,28,3,99, None)
datetime.datetime(2006,4,11,hour=2)
datetime.datetime(2006,4,11,hour=2,minute=28)
datetime.datetime(2006,4,11,hour=2,minute=28,second=3)
datetime.datetime(2006,4,11,hour=2,minute=28,second=3, microsecond=99)
datetime.datetime(2006,4,11,hour=2,minute=28,second=3, microsecond=99, tzinfo=None)
datetime.datetime(1, 1, 1, 0, 0, 0, 0) #min
datetime.datetime(9999, 12, 31, 23, 59, 59, 999999) #max
datetime.datetime(2004, 2, 29, 16, 20, 22, 262000) #leapyear
AssertError(ValueError, datetime.datetime, 2006, 2, 29, 16, 20, 22, 262000) #bad leapyear
AssertError(ValueError, datetime.datetime, 2006, 9, 31, 16, 20, 22, 262000) #bad number of days
AssertError(ValueError, datetime.datetime, 0, 1, 1, 0, 0, 0, 0)
AssertError(ValueError, datetime.datetime, 1, 0, 1, 0, 0, 0, 0)
AssertError(ValueError, datetime.datetime, 1, 1, 0, 0, 0, 0, 0)
AssertError(ValueError, datetime.datetime, -1, 1, 1, 0, 0, 0, 0)
AssertError(ValueError, datetime.datetime, 1, -1, 1, 0, 0, 0, 0)
AssertError(ValueError, datetime.datetime, 1, 1, -1, 0, 0, 0, 0)
AssertError(ValueError, datetime.datetime, 1, 1, 1, -1, 0, 0, 0)
AssertError(ValueError, datetime.datetime, 1, 1, 1, 0, -1, 0, 0)
AssertError(ValueError, datetime.datetime, 1, 1, 1, 0, 0, -1, 0)
AssertError(ValueError, datetime.datetime, 1, 1, 1, 0, 0, 0, -1)
AssertError(ValueError, datetime.datetime, -10, -10, -10, -10, -10, -10, -10)
AssertError(ValueError, datetime.datetime, 10000, 12, 31, 23, 59, 59, 999999)
AssertError(ValueError, datetime.datetime, 9999, 13, 31, 23, 59, 59, 999999)
AssertError(ValueError, datetime.datetime, 9999, 12, 32, 23, 59, 59, 999999)
AssertError(ValueError, datetime.datetime, 9999, 12, 31, 24, 59, 59, 999999)
AssertError(ValueError, datetime.datetime, 9999, 12, 31, 23, 60, 59, 999999)
AssertError(ValueError, datetime.datetime, 9999, 12, 31, 23, 59, 60, 999999)
AssertError(ValueError, datetime.datetime, 9999, 12, 31, 23, 59, 59, 1000000)
AssertError(ValueError, datetime.datetime, 10000, 13, 32, 24, 60, 60, 1000000)
AssertError(ValueError, datetime.datetime, 100000, 130, 320, 240, 600, 600, 10000000)
AssertError(TypeError, datetime.datetime, 2006, 4, 11, 2, 28, 3, 99, 1)
#--------------------------------------------------------------------------
#--Test subtraction datetime
test_data = { ((2006, 9, 29, 15, 37, 28, 686000), (2006, 9, 29, 15, 37, 28, 686000)) : ((0, 0, 0),(0, 0, 0)),
((2006, 9, 29, 15, 37, 28, 686000), (2007, 9, 29, 15, 37, 28, 686000)) : ((365, 0, 0),(-365, 0, 0)),
((2006, 9, 29, 15, 37, 28, 686000), (2006,10, 29, 15, 37, 28, 686000)) : ((30, 0, 0),(-30, 0, 0)),
((2006, 9, 29, 15, 37, 28, 686000), (2006, 9, 30, 15, 37, 28, 686000)) : ((1, 0, 0),(-1, 0, 0)),
((2006, 9, 29, 15, 37, 28, 686000), (2006, 9, 29, 16, 37, 28, 686000)) : ((0, 3600, 0),(-1, 82800, 0)),
((2006, 9, 29, 15, 37, 28, 686000), (2006, 9, 29, 15, 38, 28, 686000)) : ((0, 60, 0),(-1, 86340, 0)),
((2006, 9, 29, 15, 37, 28, 686000), (2006, 9, 29, 15, 37, 29, 686000)) : ((0, 1, 0),(-1, 86399, 0)),
((2006, 9, 29, 15, 37, 28, 686000), (2006, 9, 29, 15, 37, 28, 686001)) : ((0, 0, 1),(-1, 86399, 999999)),
((1, 1, 1, 0, 0, 0, 0), (1, 1, 1, 0, 0, 0, 0)) : ((0, 0, 0),(0, 0, 0)),
((9999, 12, 31, 23, 59, 59, 999999), (9999, 12, 31, 23, 59, 59, 999999)) : ((0, 0, 0),(0, 0, 0))
}
for key, (value0, value1) in test_data.iteritems():
dt1 = datetime.datetime(*key[1])
dt0 = datetime.datetime(*key[0])
x = dt1 - dt0
AreEqual(x.days, value0[0])
AreEqual(x.seconds, value0[1])
AreEqual(x.microseconds, value0[2])
y = dt0 - dt1
AreEqual(y.days, value1[0])
AreEqual(y.seconds, value1[1])
AreEqual(y.microseconds, value1[2])
#--------------------------------------------------------------------------
#--Test subtraction - timedelta
test_data = { ((2006, 9, 29, 15, 37, 28, 686000), (0, 0, 0)) : (2006, 9, 29, 15, 37, 28, 686000),
((2006, 9, 29, 15, 37, 28, 686000), (1, 0, 0)) : (2006, 9, 28, 15, 37, 28, 686000),
((2006, 9, 29, 15, 37, 28, 686000), (0, 1, 0)) : (2006, 9, 29, 15, 37, 27, 686000),
((2006, 9, 29, 15, 37, 28, 686000), (0, 0, 1)) : (2006, 9, 29, 15, 37, 28, 685999),
((2006, 9, 29, 15, 37, 28, 686000), (1, 1, 1)) : (2006, 9, 28, 15, 37, 27, 685999),
((2006, 9, 29, 15, 37, 28, 686000), (-1, 0, 0)) : (2006, 9, 30, 15, 37, 28, 686000),
((2006, 9, 29, 15, 37, 28, 686000), (0, -1, 0)) : (2006, 9, 29, 15, 37, 29, 686000),
((2006, 9, 29, 15, 37, 28, 686000), (0, 0, -1)) : (2006, 9, 29, 15, 37, 28, 686001),
((2006, 9, 29, 15, 37, 28, 686000), (-1, -1, -1)) : (2006, 9, 30, 15, 37, 29, 686001),
((9999, 12, 31, 23, 59, 59, 999999), (1, 1, 1)) : (9999, 12, 30, 23, 59, 58, 999998),
((9999, 12, 31, 23, 59, 59, 999999), (9999*365, 0, 0)) : (7, 8, 21, 23, 59, 59, 999999),
((9999, 12, 31, 23, 59, 59, 999999), (0, 0, 0)) : (9999, 12, 31, 23, 59, 59, 999999),
}
for (dt1, td0), value in test_data.iteritems():
x = datetime.datetime(*dt1) - datetime.timedelta(*td0)
AreEqual(x.year,value[0])
AreEqual(x.month, value[1])
AreEqual(x.day, value[2])
AreEqual(x.hour, value[3])
AreEqual(x.minute, value[4])
AreEqual(x.second, value[5])
AreEqual(x.microsecond, value[6])
#--------------------------------------------------------------------------
#--Test addition
test_data = { ((2006, 9, 29, 15, 37, 28, 686000), (0, 0, 0)) : (2006, 9, 29, 15, 37, 28, 686000),
((2006, 9, 29, 15, 37, 28, 686000), (1, 0, 0)) : (2006, 9, 30, 15, 37, 28, 686000),
((2006, 9, 29, 15, 37, 28, 686000), (0, 1, 0)) : (2006, 9, 29, 15, 37, 29, 686000),
((2006, 9, 29, 15, 37, 28, 686000), (0, 0, 1)) : (2006, 9, 29, 15, 37, 28, 686001),
((2006, 9, 29, 15, 37, 28, 686000), (1, 1, 1)) : (2006, 9, 30, 15, 37, 29, 686001),
((2006, 9, 29, 15, 37, 28, 686000), (-1, 0, 0)) : (2006, 9, 28, 15, 37, 28, 686000),
((2006, 9, 29, 15, 37, 28, 686000), (0, -1, 0)) : (2006, 9, 29, 15, 37, 27, 686000),
((2006, 9, 29, 15, 37, 28, 686000), (0, 0, -1)) : (2006, 9, 29, 15, 37, 28, 685999),
((2006, 9, 29, 15, 37, 28, 686000), (-1, -1, -1)) : (2006, 9, 28, 15, 37, 27, 685999),
((9999, 12, 31, 23, 59, 59, 999999), (-1, -1, -1)) : (9999, 12, 30, 23, 59, 58, 999998),
((9999, 12, 31, 23, 59, 59, 999999), (-9999*365, 0, 0)) : (7, 8, 21, 23, 59, 59, 999999),
((9999, 12, 31, 23, 59, 59, 999999), (0, 0, 0)) : (9999, 12, 31, 23, 59, 59, 999999),
}
for (dt1, td0), value in test_data.iteritems():
x = datetime.datetime(*dt1) + datetime.timedelta(*td0)
AreEqual(x.year,value[0])
AreEqual(x.month, value[1])
AreEqual(x.day, value[2])
AreEqual(x.hour, value[3])
AreEqual(x.minute, value[4])
AreEqual(x.second, value[5])
AreEqual(x.microsecond, value[6])
#CodePlex Work Item 4861
x = datetime.timedelta(*td0) + datetime.datetime(*dt1)
AreEqual(x.year,value[0])
AreEqual(x.month, value[1])
AreEqual(x.day, value[2])
AreEqual(x.hour, value[3])
AreEqual(x.minute, value[4])
AreEqual(x.second, value[5])
AreEqual(x.microsecond, value[6])
#--------------------------------------------------------------------------
#today
t0 = datetime.datetime.today()
t1 = datetime.datetime.today()
AreEqual(type(t0), datetime.datetime)
Assert(t0<=t1)
#now
from time import sleep
t0 = datetime.datetime.now()
sleep(1)
t1 = datetime.datetime.now()
AreEqual(type(t0), datetime.datetime)
Assert(t1>t0)
datetime.datetime.now(None)
#now
t0 = datetime.datetime.utcnow()
sleep(1)
t1 = datetime.datetime.utcnow()
AreEqual(type(t0), datetime.datetime)
Assert(t1>t0)
#fromtimestamp
#Merlin Work Item 148717
x = datetime.datetime.fromtimestamp(1000000000.0)
AreEqual(x.year, 2001)
AreEqual(x.month, 9)
AreEqual(x.day, 8)
#CodePlex 4862
#AreEqual(x.hour, 18)
AreEqual(x.minute, 46)
AreEqual(x.second, 40)
x = datetime.datetime.fromtimestamp(1000000000)
AreEqual(x.year, 2001)
AreEqual(x.month, 9)
AreEqual(x.day, 8)
#CodePlex Work Item 4862
#AreEqual(x.hour, 18)
AreEqual(x.minute, 46)
AreEqual(x.second, 40)
#fromordinal
x = datetime.datetime.fromordinal(1234567)
AreEqual(x.year, 3381)
AreEqual(x.month, 2)
AreEqual(x.day, 16)
#fromtimestamp
x = datetime.datetime.utcfromtimestamp(1000000000.0)
AreEqual(x.year, 2001)
AreEqual(x.month, 9)
AreEqual(x.day, 9)
#CodePlex 4862
#AreEqual(x.hour, 1)
AreEqual(x.minute, 46)
AreEqual(x.second, 40)
#combine
x = datetime.datetime.combine(datetime.date(2005, 3, 22), datetime.time(2,28,3,99))
y = datetime.datetime(2005, 3, 22, 2, 28, 3, 99)
AreEqual(x, y)
#strptime - new for Python 2.5. No need to test
#min
x = datetime.datetime.min
AreEqual(x.year, datetime.MINYEAR)
AreEqual(x.month, 1)
AreEqual(x.day, 1)
AreEqual(x.hour, 0)
AreEqual(x.minute, 0)
AreEqual(x.second, 0)
AreEqual(x.microsecond, 0)
#CodePlex Work Item 4863
y = datetime.datetime(datetime.MINYEAR, 1, 1)
AreEqual(x, y)
#max
#CodePlex Work Item 4864
x = datetime.datetime.max
AreEqual(x.year, datetime.MAXYEAR)
AreEqual(x.month, 12)
AreEqual(x.day, 31)
AreEqual(x.hour, 23)
AreEqual(x.minute, 59)
AreEqual(x.second, 59)
AreEqual(x.microsecond, 999999)
#CodePlex Work Item 4865
y = datetime.datetime(datetime.MAXYEAR, 12, 31, 23, 59, 59, 999999, None)
AreEqual(x, y)
#resolution
#CodePlex Work Item 4866
x = datetime.datetime.resolution
AreEqual(x.days, 0)
AreEqual(x.seconds, 0)
AreEqual(x.microseconds, 1)
#equality
x = datetime.datetime(2005, 3, 22)
y = x
Assert(x==y)
y = datetime.datetime(2005, 3, 23)
Assert(not(x==y))
Assert(x==datetime.datetime(2005, 3, 22))
#inequality
x = datetime.datetime(2005, 3, 22)
y = None
Assert(x!=y)
y = datetime.datetime(2005, 3, 23)
Assert(x!=y)
#ge/le/gt/lt/eq/ne
#CodePlex Work Item 4860
#x_args = [2006, 9, 29, 15, 37, 28, 686000]
x_args = [2006, 9, 29, 15, 37, 28]
x = datetime.datetime(*x_args)
for i in range(len(x_args)):
y_args = x_args[0:]
y_args [i] = y_args[i]+1
y = datetime.datetime(*y_args)
x_copy = datetime.datetime(*x_args)
Assert(y>=x)
Assert(x<=y)
Assert(x<y)
Assert(y>x)
Assert(not x>y)
Assert(not x>y)
Assert(not x==y)
Assert(x!=y)
Assert(x==x_copy)
Assert(x_copy >= x)
Assert(x_copy <= x)
#date and time
date = datetime.date(2005, 3, 23)
time = datetime.time(2,28,3,99)
x = datetime.datetime(2005, 3, 23, 2, 28, 3, 99)
AreEqual(x.date(), date)
#CodePlex Work Item 4860
AreEqual(x.time(), time)
#timetz
#CodePlex Work Item 4860
x = datetime.datetime(2005, 3, 23, 2, 28, 3, 99)
time = datetime.time(2,28,3,99)
AreEqual(x.timetz(), time)
#replace
x = datetime.datetime(2005, 3, 23, 2, 28, 3, 99)
y = datetime.datetime(2005, 3, 23, 2, 28, 3, 99)
z = x.replace(year=1000)
AreEqual(y, x)
AreEqual(z.year, 1000)
z = x.replace(month=7)
AreEqual(y, x)
AreEqual(z.month, 7)
z = x.replace(day=12)
AreEqual(y, x)
AreEqual(z.day, 12)
z = x.replace(hour=3)
AreEqual(y, x)
AreEqual(z.hour, 3)
z = x.replace(minute=5)
AreEqual(y, x)
AreEqual(z.minute, 5)
z = x.replace(second=25)
AreEqual(y, x)
AreEqual(z.second, 25)
#CodePlex Work Item 4860
z = x.replace(microsecond=250)
AreEqual(y, x)
AreEqual(z.microsecond, 250)
z = x.replace(year=1000, month=7, day=12, hour=3, minute=5, second=25, microsecond=250)
AreEqual(y, x)
AreEqual(z.year, 1000)
AreEqual(z.month, 7)
AreEqual(z.day, 12)
AreEqual(z.hour, 3)
AreEqual(z.minute, 5)
AreEqual(z.second, 25)
#CodePlex Work Item 4860
AreEqual(z.microsecond, 250)
#astimezone
#TODO
#x = datetime.datetime(2005, 3, 23, 2, 28, 3, 99)
#x.astimezone(...)
#utcoffset
x = datetime.datetime(2005, 3, 23, 2,28,3,99,None)
AreEqual(x.utcoffset(), None)
#dst
x = datetime.datetime(2005, 3, 23,2,28,3,99,None)
AreEqual(x.dst(), None)
#tzname
x = datetime.datetime(2005, 3, 23, 2,28,3,99,None)
AreEqual(x.tzname(), None)
#timetuple
AreEqual(datetime.datetime(2005, 3, 23,2,28,3,99,None).timetuple(), (2005, 3, 23, 2, 28, 3, 2, 82, -1))
#utctimetuple
AreEqual(datetime.datetime(2005, 3, 23,2,28,3,99,None).utctimetuple(), (2005, 3, 23, 2, 28, 3, 2, 82, 0))
#toordinal
AreEqual(datetime.datetime(2005, 3, 23,2,28,3,99,None).toordinal(), 732028)
#weekday
AreEqual(datetime.datetime(2005, 3, 23,2,28,3,99,None).weekday(), 2)
#isocalendar
x = datetime.datetime(2005, 3, 22,2,28,3,99,None).isocalendar()
AreEqual(x[0], 2005)
AreEqual(x[1], 12)
AreEqual(x[2], 2)
#isoformat
x = datetime.datetime(2005, 3, 22, 2,28,3,99,None).isoformat()
AreEqual(x, '2005-03-22T02:28:03.000099')
#isoweekday
AreEqual(datetime.datetime(2005, 3, 22, 2,28,3,99,None).isoweekday(), 2)
#ctime
AreEqual(datetime.datetime(2005, 3, 22, 2,28,3,99,None).ctime(), 'Tue Mar 22 02:28:03 2005')
#strftime
x = datetime.datetime(2005, 3, 22, 2,28,3,99,None)
AreEqual(x.strftime("%y-%a-%b"), "05-Tue-Mar")
AreEqual(x.strftime("%Y-%A-%B"), "2005-Tuesday-March")
AreEqual(x.strftime("%Y%m%d"), '20050322')
AreEqual(x.strftime("%I:%M:%S"), "02:28:03")
#Similar to Merlin Work Item 148470
AreEqual(x.strftime("%H:%M:%S"), "02:28:03")
#Similar to Merlin Work Item 148470
if not is_silverlight:
AreEqual(x.strftime("%a %A %b %B %c %d %H %I %j %m %M %p %S %U %w %W %x %X %y %Y %Z %%"), "Tue Tuesday Mar March 03/22/05 02:28:03 22 02 02 081 03 28 AM 03 12 2 12 03/22/05 02:28:03 05 2005 %")
def test_timedelta():
#CodePlex Work Item 4871
x = datetime.timedelta(1, 2, 3, 4, 5, 6, 7)
AreEqual(x.days, 50)
AreEqual(x.seconds, 21902)
AreEqual(x.microseconds, 4003)
x = datetime.timedelta(days=1, seconds=2, microseconds=3)
AreEqual(x.days, 1)
AreEqual(x.seconds, 2)
AreEqual(x.microseconds, 3)
x = datetime.timedelta(1, 2, 3)
AreEqual(x.days, 1)
AreEqual(x.seconds, 2)
AreEqual(x.microseconds, 3)
x = datetime.timedelta(1, 2)
AreEqual(x.days, 1)
AreEqual(x.seconds, 2)
AreEqual(x.microseconds, 0)
x = datetime.timedelta(1, 2, 3.14)
AreEqual(x.days, 1)
AreEqual(x.seconds, 2)
AreEqual(x.microseconds, 3)
#CodePlex WorkItem 5132
x = datetime.timedelta(1, 2, 3.74)
AreEqual(x.days, 1)
AreEqual(x.seconds, 2)
AreEqual(x.microseconds, 4)
#CodePlex Work Item 5149
x = datetime.timedelta(1, 2.0000003, 3.33)
AreEqual(x.days, 1)
AreEqual(x.seconds, 2)
AreEqual(x.microseconds, 4)
#CodePlex WorkItem 5133
x = datetime.timedelta(microseconds=-1)
AreEqual(x.days, -1)
AreEqual(x.seconds, 86399)
AreEqual(x.microseconds, 999999)
#CodePlex Work Item 5136
datetime.timedelta(-99999999, 0, 0) #min
datetime.timedelta(0, 0, 0)
#CodePlex Work Item 5136
datetime.timedelta(999999999, 3600*24-1, 1000000-1) #max
AssertError(OverflowError, datetime.timedelta, -1000000000, 0, 0)
#CodePlex WorkItem 5133
x = datetime.timedelta(0, -1, 0)
AreEqual(x.days, -1)
AreEqual(x.seconds, 3600*24-1)
AreEqual(x.microseconds, 0)
x = datetime.timedelta(0, 0, -1)
AreEqual(x.days, -1)
AreEqual(x.seconds, 3600*24-1)
AreEqual(x.microseconds, 999999)
AssertError(OverflowError, datetime.timedelta, 1000000000, 0, 0)
AssertError(OverflowError, datetime.timedelta, 999999999, 3600*24, 0)
AssertError(OverflowError, datetime.timedelta, 999999999, 3600*24-1, 1000000)
#min
x = datetime.timedelta.min
AreEqual(x.days, -999999999)
AreEqual(x.seconds, 0)
AreEqual(x.microseconds, 0)
#max
x = datetime.timedelta.max
AreEqual(x.days, 999999999)
AreEqual(x.seconds, 3600*24-1)
AreEqual(x.microseconds, 999999)
#CodePlex Work Item 5136
x = datetime.timedelta.resolution
AreEqual(x.days, 0)
AreEqual(x.seconds, 0)
AreEqual(x.microseconds, 1)
#--------------------------------------------------------------------------
#--Test addition
test_data = { ((37, 28, 686000), (37, 28, 686000)) : (74, 57, 372000),
((37, 28, 686000), (38, 28, 686000)) : (75, 57, 372000),
((37, 28, 686000), (37, 29, 686000)) : (74, 58, 372000),
((37, 28, 686000), (37, 28, 686001)) : (74, 57, 372001),
((0, 0, 0), (0, 0, 0)) : (0, 0, 0),
#Related to CodePlex Work Item 5135
((999999999, 0, 0), (0, 0, 0)) : (999999999, 0, 0),
((37, 28, 686000), (-1, -1, -1)) : (36, 27, 685999),
((-1, -1, -1), (37, 28, 686000)) : (36, 27, 685999),
}
for key, value0 in test_data.iteritems():
dt1 = datetime.timedelta(*key[1])
dt0 = datetime.timedelta(*key[0])
x = dt1 + dt0
AreEqual(x.days, value0[0])
AreEqual(x.seconds, value0[1])
AreEqual(x.microseconds, value0[2])
#--------------------------------------------------------------------------
#--Test subtraction
test_data = { ((37, 28, 686000), (37, 28, 686000)) : ((0, 0, 0),(0, 0, 0)),
((37, 28, 686000), (38, 28, 686000)) : ((1, 0, 0),(-1, 0, 0)),
((37, 28, 686000), (-1, -1, -1)) : ((-39, 86370, 313999),(38, 29, 686001)),
((37, 28, 686000), (37, 29, 686000)) : ((0, 1, 0),(-1, 86399, 0)),
((37, 28, 686000), (37, 28, 686001)) : ((0, 0, 1),(-1, 86399, 999999)),
((0, 0, 0), (0, 0, 0)) : ((0, 0, 0),(0, 0, 0)),
#CodePlex Work Item 5135
((999999999, 0, 0), (999999999, 0, 0)) : ((0, 0, 0),(0, 0, 0))
}
for key, (value0, value1) in test_data.iteritems():
dt1 = datetime.timedelta(*key[1])
dt0 = datetime.timedelta(*key[0])
x = dt1 - dt0
AreEqual(x.days, value0[0])
AreEqual(x.seconds, value0[1])
AreEqual(x.microseconds, value0[2])
y = dt0 - dt1
AreEqual(y.days, value1[0])
AreEqual(y.seconds, value1[1])
AreEqual(y.microseconds, value1[2])
#multiply
x = datetime.timedelta(37, 28, 686000) * 12
AreEqual(x.days, 444)
AreEqual(x.seconds, 344)
AreEqual(x.microseconds, 232000)
#division
x = datetime.timedelta(37, 28, 686000) // 3
AreEqual(x.days, 12)
AreEqual(x.seconds, 3600*8+9)
AreEqual(x.microseconds, 562000)
#+
x = datetime.timedelta(37, 28, 686000)
y = +x
Assert(y==x)
#-
x = datetime.timedelta(37, 28, 686000)
y = -x
AreEqual(y.days, -38)
AreEqual(y.seconds, 86371)
AreEqual(y.microseconds, 314000)
#absolute
x = datetime.timedelta(37, 28, 686000)
AreEqual(abs(x), x)
y = datetime.timedelta(-1, 0, 0)
AreEqual(abs(y), datetime.timedelta(1, 0, 0))
#equality
x = datetime.timedelta(1, 2, 33)
y = x
Assert(x==y)
y = datetime.timedelta(1, 2, 34)
Assert(not(x==y))
Assert(x==datetime.timedelta(1, 2, 33))
#inequality
x = datetime.timedelta(1, 2, 33)
y = None
Assert(x!=y)
y = datetime.timedelta(1, 2, 34)
Assert(x!=y)
#ge
Assert(datetime.timedelta(1, 2, 33) >= datetime.timedelta(1, 2, 33))
Assert(datetime.timedelta(1, 2, 34) >= datetime.timedelta(1, 2, 33))
Assert(datetime.timedelta(1, 2, 35) >= datetime.timedelta(1, 2, 33))
Assert(not (datetime.timedelta(1, 2, 32) >= datetime.timedelta(1, 2, 33)))
Assert(not (datetime.timedelta(1, 2, 31) >= datetime.timedelta(1, 2, 33)))
#le
Assert(datetime.timedelta(1, 2, 33) <= datetime.timedelta(1, 2, 33))
Assert(datetime.timedelta(1, 2, 33) <= datetime.timedelta(1, 2, 34))
Assert(datetime.timedelta(1, 2, 33) <= datetime.timedelta(1, 2, 35))
Assert(not (datetime.timedelta(1, 2, 33) <= datetime.timedelta(1, 2, 32)))
Assert(not (datetime.timedelta(1, 2, 33) <= datetime.timedelta(1, 2, 31)))
#gt
Assert(not (datetime.timedelta(1, 2, 33) > datetime.timedelta(1, 2, 33)))
Assert(datetime.timedelta(1, 2, 34) > datetime.timedelta(1, 2, 33))
Assert(datetime.timedelta(1, 2, 35) > datetime.timedelta(1, 2, 33))
Assert(not (datetime.timedelta(1, 2, 32) > datetime.timedelta(1, 2, 33)))
Assert(not (datetime.timedelta(1, 2, 31) > datetime.timedelta(1, 2, 33)))
#lt
Assert(not(datetime.timedelta(1, 2, 33) < datetime.timedelta(1, 2, 33)))
Assert(datetime.timedelta(1, 2, 33) < datetime.timedelta(1, 2, 34))
Assert(datetime.timedelta(1, 2, 33) < datetime.timedelta(1, 2, 35))
Assert(not (datetime.timedelta(1, 2, 33) < datetime.timedelta(1, 2, 32)))
Assert(not (datetime.timedelta(1, 2, 33) < datetime.timedelta(1, 2, 31)))
#hash
x = datetime.timedelta(1, 2, 33)
Assert(x.__hash__()!=None)
#bool
Assert(datetime.timedelta(0, 0, 1))
Assert(not(datetime.timedelta(0, 0, 0)))
def test_time():
#basic sanity checks
x = datetime.time(2,28,3,99,datetime.tzinfo())
AreEqual(x.hour, 2)
AreEqual(x.minute, 28)
AreEqual(x.second, 3)
AreEqual(x.microsecond, 99)
x = datetime.time(2,28,3,99,None)
AreEqual(x.hour, 2)
AreEqual(x.minute, 28)
AreEqual(x.second, 3)
AreEqual(x.microsecond, 99)
x = datetime.time(2,28,3,99)
AreEqual(x.hour, 2)
AreEqual(x.minute, 28)
AreEqual(x.second, 3)
AreEqual(x.microsecond, 99)
x = datetime.time(2,28,3)
AreEqual(x.hour, 2)
AreEqual(x.minute, 28)
AreEqual(x.second, 3)
AreEqual(x.microsecond, 0)
x = datetime.time(2,28)
AreEqual(x.hour, 2)
AreEqual(x.minute, 28)
AreEqual(x.second, 0)
AreEqual(x.microsecond, 0)
x = datetime.time(2)
AreEqual(x.hour, 2)
AreEqual(x.minute, 0)
AreEqual(x.second, 0)
AreEqual(x.microsecond, 0)
x = datetime.time()
AreEqual(x.hour, 0)
AreEqual(x.minute, 0)
AreEqual(x.second, 0)
AreEqual(x.microsecond, 0)
datetime.time(0, 0, 0, 0) #min
datetime.time(23, 59, 59, 999999) #max
#negative cases
#CodePlex Work Item 5143
AssertError(ValueError, datetime.time, -1, 0, 0, 0)
AssertError(ValueError, datetime.time, 0, -1, 0, 0)
AssertError(ValueError, datetime.time, 0, 0, -1, 0)
AssertError(ValueError, datetime.time, 0, 0, 0, -1)
AssertError(ValueError, datetime.time, -10, -10, -10, -10)
#CodePlex Work Item 5143
AssertError(ValueError, datetime.time, 24, 59, 59, 999999)
AssertError(ValueError, datetime.time, 23, 60, 59, 999999)
AssertError(ValueError, datetime.time, 23, 59, 60, 999999)
AssertError(ValueError, datetime.time, 23, 59, 59, 1000000)
AssertError(ValueError, datetime.time, 24, 60, 60, 1000000)
AssertError(ValueError, datetime.time, 240, 600, 600, 10000000)
#min
x = datetime.time.min
AreEqual(x.hour, 0)
AreEqual(x.minute, 0)
AreEqual(x.second, 0)
AreEqual(x.microsecond, 0)
#max
x = datetime.time.max
AreEqual(x.hour, 23)
AreEqual(x.minute, 59)
AreEqual(x.second, 59)
AreEqual(x.microsecond, 999999)
#resolution
#CodePlex Work Item 5145
x = datetime.time.resolution
AreEqual(repr(x), 'datetime.timedelta(0, 0, 1)')
#equality
x = datetime.time(1, 2, 33, 444)
y = x
Assert(x==y)
y = datetime.time(1, 2, 33, 445)
Assert(not(x==y))
Assert(x==datetime.time(1, 2, 33, 444))
#inequality
x = datetime.time(1, 2, 33, 444)
y = None
Assert(x!=y)
y = datetime.time(1, 2, 33, 445)
Assert(x!=y)
#ge
Assert(datetime.time(1, 2, 33, 444) >= datetime.time(1, 2, 33, 444))
Assert(datetime.time(1, 2, 33, 445) >= datetime.time(1, 2, 33, 444))
Assert(datetime.time(1, 2, 33, 446) >= datetime.time(1, 2, 33, 444))
Assert(not (datetime.time(1, 2, 33, 443) >= datetime.time(1, 2, 33, 444)))
Assert(not (datetime.time(1, 2, 33, 442) >= datetime.time(1, 2, 33, 444)))
#le
Assert(datetime.time(1, 2, 33, 444) <= datetime.time(1, 2, 33, 444))
Assert(datetime.time(1, 2, 33, 444) <= datetime.time(1, 2, 33, 445))
Assert(datetime.time(1, 2, 33, 444) <= datetime.time(1, 2, 33, 446))
Assert(not (datetime.time(1, 2, 33, 444) <= datetime.time(1, 2, 33, 443)))
Assert(not (datetime.time(1, 2, 33, 444) <= datetime.time(1, 2, 33, 442)))
#gt
Assert(not (datetime.time(1, 2, 33, 444) > datetime.time(1, 2, 33, 444)))
Assert(datetime.time(1, 2, 33, 445) > datetime.time(1, 2, 33, 444))
Assert(datetime.time(1, 2, 33, 446) > datetime.time(1, 2, 33, 444))
Assert(not (datetime.time(1, 2, 33, 443) > datetime.time(1, 2, 33, 444)))
Assert(not (datetime.time(1, 2, 33, 442) > datetime.time(1, 2, 33, 444)))
#lt
Assert(not(datetime.time(1, 2, 33, 444) < datetime.time(1, 2, 33, 444)))
Assert(datetime.time(1, 2, 33, 444) < datetime.time(1, 2, 33, 445))
Assert(datetime.time(1, 2, 33, 444) < datetime.time(1, 2, 33, 446))
Assert(not (datetime.time(1, 2, 33, 444) < datetime.time(1, 2, 33, 443)))
Assert(not (datetime.time(1, 2, 33, 444) < datetime.time(1, 2, 33, 442)))
#hash
x = datetime.time(1, 2, 33, 444)
Assert(x.__hash__()!=None)
#bool
Assert(datetime.time(0, 0, 0, 1))
#CodePlex Work Item 5139
Assert(not (datetime.time(0, 0, 0, 0)))
#replace
x = datetime.time(1, 2, 33, 444)
y = datetime.time(1, 2, 33, 444)
z = x.replace(hour=3)
AreEqual(y, x)
AreEqual(z.hour, 3)
z = x.replace(minute=5)
AreEqual(y, x)
AreEqual(z.minute, 5)
z = x.replace(second=25)
AreEqual(y, x)
AreEqual(z.second, 25)
z = x.replace(microsecond=250)
AreEqual(y, x)
AreEqual(z.microsecond, 250)
z = x.replace(hour=3, minute=5, second=25, microsecond=250)
AreEqual(y, x)
AreEqual(z.hour, 3)
AreEqual(z.minute, 5)
AreEqual(z.second, 25)
AreEqual(z.microsecond, 250)
#isoformat
#CodePlex Work Item 5146
x = datetime.time(2,28,3,99).isoformat()
AreEqual(x, '02:28:03.000099')
#strftime
x = datetime.time(13,28,3,99)
AreEqual(x.strftime("%I:%M:%S"), "01:28:03")
AreEqual(x.strftime("%H:%M:%S"), "13:28:03")
#utcoffset
#CodePlex Work Item 5147
x = datetime.time(2,28,3,99,None)
AreEqual(x.utcoffset(), None)
#dst
x = datetime.time(2,28,3,99,None)
AreEqual(x.dst(), None)
#tzname
#CodePlex Work Item 5148
x = datetime.time(2,28,3,99,None)
AreEqual(x.tzname(), None)
@skip("win32")
def test_tzinfo():
x = datetime.tzinfo()
AssertError(NotImplementedError, x.utcoffset, None)
AssertError(NotImplementedError, x.dst, None)
AssertError(NotImplementedError, x.tzname, None)
AssertError(NotImplementedError, x.fromutc, None)
def test_invariant():
for x in range(0, 10000, 99):
now = datetime.datetime.now()
delta = datetime.timedelta(microseconds=x)
AreEqual(now, now + delta - delta)
AreEqual(now, -delta + now + delta)
pow10 = 1
for x in range(10):
pow10 = 10 * pow10
for y in [-9, -1, 0, 1, 49, 99]:
delta = datetime.timedelta(microseconds= pow10 + y)
AreEqual(delta * 2, delta + delta)
delta = datetime.timedelta(microseconds= -pow10 - y)
AreEqual(delta * 3, delta + delta + delta)
@skip("win32")
def test_cli_interop():
# can construct Python datetime from .NET datetime
import System
now = System.DateTime.Now
pyNow = datetime.datetime(System.DateTime.Now)
AreEqual(pyNow.month, now.Month)
AreEqual(pyNow.day, now.Day)
AreEqual(pyNow.year, now.Year)
def test_cp13704():
'''
TODO:
- extend this for datetime.date, datetime.time, etc
- splatted args, keyword args, etc
'''
min_args = 3
if not (is_cli or is_silverlight):
AssertErrorWithMessage(TypeError, "Required argument 'year' (pos 1) not found",
datetime.datetime)
AssertErrorWithMessage(TypeError, "an integer is required",
datetime.datetime, None, None)
AssertErrorWithMessage(TypeError, "function takes at most 8 arguments (9 given)",
datetime.datetime, None, None, None, None, None, None, None, None, None)
AssertErrorWithMessage(TypeError, "function takes at most 8 arguments (10 given)",
datetime.datetime, None, None, None, None, None, None, None, None, None, None)
else: #http://ironpython.codeplex.com/WorkItem/View.aspx?WorkItemId=13704
AssertErrorWithMessage(TypeError, "function takes at least %d arguments (0 given)" % min_args,
datetime.datetime)
AssertErrorWithMessage(TypeError, "function takes at least %d arguments (2 given)" % min_args,
datetime.datetime, None, None)
AssertErrorWithMessage(TypeError, "function takes at most 8 arguments (9 given)",
datetime.datetime, None, None, None, None, None, None, None, None, None)
AssertErrorWithMessage(TypeError, "function takes at most 8 arguments (10 given)",
datetime.datetime, None, None, None, None, None, None, None, None, None, None)
def test_pickle():
import cPickle
now = datetime.datetime.now()
nowstr = cPickle.dumps(now)
AreEqual(now, cPickle.loads(nowstr))
@skip("silverlight")
def test_datetime_datetime_pickled_by_cpy():
import cPickle
with open(r"pickles\cp18666.pickle", "rb") as f:
expected_dt = datetime.datetime(2009, 8, 6, 8, 42, 38, 196000)
pickled_cpy_dt = cPickle.load(f)
AreEqual(expected_dt, pickled_cpy_dt)
expected_dt = datetime.datetime(2009, 8, 6, 8, 42, 38, 196000, mytzinfo())
pickled_cpy_dt = cPickle.loads("cdatetime\ndatetime\np1\n(S'\\x07\\xd9\\x01\\x02\\x03\\x04\\x05\\x00\\x00\\x06'\nc" + __name__ + "\nmytzinfo\np2\n(tRp3\ntRp4\n.")
AreEqual(expected_dt.tzinfo, pickled_cpy_dt.tzinfo)
class mytzinfo(datetime.tzinfo):
def __repr__(self): return 'hello'
def __eq__(self, other):
return type(other) == type(self)
def test_datetime_repr():
AreEqual(repr(datetime.datetime(2009, 1, 2, 3, 4, 5, 6, mytzinfo())), "datetime.datetime(2009, 1, 2, 3, 4, 5, 6, tzinfo=hello)")
#--MAIN------------------------------------------------------------------------
run_test(__name__)
| tempbottle/ironpython3 | Tests/modules/misc/datetime_test.py | Python | apache-2.0 | 40,263 |
import logging
from sqp_project import settings
logging.basicConfig(filename=settings.LOG_FILENAME,level=logging.DEBUG,)
logging.debug('Started logging.')
| recsm/SQP | sqp/log.py | Python | mit | 158 |
import sys
import platform
from numpy.testing import *
import numpy.core.umath as ncu
import numpy as np
# TODO: branch cuts (use Pauli code)
# TODO: conj 'symmetry'
# TODO: FPU exceptions
# At least on Windows the results of many complex functions are not conforming
# to the C99 standard. See ticket 1574.
# Ditto for Solaris (ticket 1642) and OS X on PowerPC.
olderr = np.seterr(all='ignore')
try:
functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0)
or (np.log(complex(np.NZERO, 0)).imag != np.pi))
finally:
np.seterr(**olderr)
# TODO: replace with a check on whether platform-provided C99 funcs are used
skip_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky)
def platform_skip(func):
return dec.skipif(skip_complex_tests,
"Numpy is using complex functions (e.g. sqrt) provided by your"
"platform's C library. However, they do not seem to behave according"
"to C99 -- so C99 tests are skipped.")(func)
class TestCexp(object):
def test_simple(self):
check = check_complex_value
f = np.exp
yield check, f, 1, 0, np.exp(1), 0, False
yield check, f, 0, 1, np.cos(1), np.sin(1), False
ref = np.exp(1) * np.complex(np.cos(1), np.sin(1))
yield check, f, 1, 1, ref.real, ref.imag, False
@platform_skip
def test_special_values(self):
# C99: Section G 6.3.1
check = check_complex_value
f = np.exp
# cexp(+-0 + 0i) is 1 + 0i
yield check, f, np.PZERO, 0, 1, 0, False
yield check, f, np.NZERO, 0, 1, 0, False
# cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU
# exception
yield check, f, 1, np.inf, np.nan, np.nan
yield check, f, -1, np.inf, np.nan, np.nan
yield check, f, 0, np.inf, np.nan, np.nan
# cexp(inf + 0i) is inf + 0i
yield check, f, np.inf, 0, np.inf, 0
# cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y
ref = np.complex(np.cos(1.), np.sin(1.))
yield check, f, -np.inf, 1, np.PZERO, np.PZERO
ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75))
yield check, f, -np.inf, 0.75 * np.pi, np.NZERO, np.PZERO
# cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y
ref = np.complex(np.cos(1.), np.sin(1.))
yield check, f, np.inf, 1, np.inf, np.inf
ref = np.complex(np.cos(np.pi * 0.75), np.sin(np.pi * 0.75))
yield check, f, np.inf, 0.75 * np.pi, -np.inf, np.inf
# cexp(-inf + inf i) is +-0 +- 0i (signs unspecified)
def _check_ninf_inf(dummy):
msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
err = np.seterr(invalid='ignore')
try:
z = f(np.array(np.complex(-np.inf, np.inf)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform %(z.real, z.imag))
finally:
np.seterr(**err)
yield _check_ninf_inf, None
# cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex.
def _check_inf_inf(dummy):
msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
err = np.seterr(invalid='ignore')
try:
z = f(np.array(np.complex(np.inf, np.inf)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
finally:
np.seterr(**err)
yield _check_inf_inf, None
# cexp(-inf + nan i) is +-0 +- 0i
def _check_ninf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
err = np.seterr(invalid='ignore')
try:
z = f(np.array(np.complex(-np.inf, np.nan)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
finally:
np.seterr(**err)
yield _check_ninf_nan, None
# cexp(inf + nan i) is +-inf + nan
def _check_inf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
err = np.seterr(invalid='ignore')
try:
z = f(np.array(np.complex(np.inf, np.nan)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
finally:
np.seterr(**err)
yield _check_inf_nan, None
# cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU
# ex)
yield check, f, np.nan, 1, np.nan, np.nan
yield check, f, np.nan, -1, np.nan, np.nan
yield check, f, np.nan, np.inf, np.nan, np.nan
yield check, f, np.nan, -np.inf, np.nan, np.nan
# cexp(nan + nani) is nan + nani
yield check, f, np.nan, np.nan, np.nan, np.nan
@dec.knownfailureif(True, "cexp(nan + 0I) is wrong on most implementations")
def test_special_values2(self):
# XXX: most implementations get it wrong here (including glibc <= 2.10)
# cexp(nan + 0i) is nan + 0i
yield check, f, np.nan, 0, np.nan, 0
class TestClog(TestCase):
def test_simple(self):
x = np.array([1+0j, 1+2j])
y_r = np.log(np.abs(x)) + 1j * np.angle(x)
y = np.log(x)
for i in range(len(x)):
assert_almost_equal(y[i], y_r[i])
@platform_skip
def test_special_values(self):
xl = []
yl = []
# From C99 std (Sec 6.3.2)
# XXX: check exceptions raised
# --- raise for invalid fails.
# clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
# floating-point exception.
err = np.seterr(divide='raise')
try:
x = np.array([np.NZERO], dtype=np.complex)
y = np.complex(-np.inf, np.pi)
self.assertRaises(FloatingPointError, np.log, x)
np.seterr(divide='ignore')
assert_almost_equal(np.log(x), y)
finally:
np.seterr(**err)
xl.append(x)
yl.append(y)
# clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
# floating-point exception.
err = np.seterr(divide='raise')
try:
x = np.array([0], dtype=np.complex)
y = np.complex(-np.inf, 0)
self.assertRaises(FloatingPointError, np.log, x)
np.seterr(divide='ignore')
assert_almost_equal(np.log(x), y)
finally:
np.seterr(**err)
xl.append(x)
yl.append(y)
# clog(x + i inf returns +inf + i pi /2, for finite x.
x = np.array([complex(1, np.inf)], dtype=np.complex)
y = np.complex(np.inf, 0.5 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
x = np.array([complex(-1, np.inf)], dtype=np.complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(x + iNaN) returns NaN + iNaN and optionally raises the
# 'invalid' floating- point exception, for finite x.
err = np.seterr(invalid='raise')
try:
x = np.array([complex(1., np.nan)], dtype=np.complex)
y = np.complex(np.nan, np.nan)
#self.assertRaises(FloatingPointError, np.log, x)
np.seterr(invalid='ignore')
assert_almost_equal(np.log(x), y)
finally:
np.seterr(**err)
xl.append(x)
yl.append(y)
err = np.seterr(invalid='raise')
try:
x = np.array([np.inf + 1j * np.nan], dtype=np.complex)
#self.assertRaises(FloatingPointError, np.log, x)
np.seterr(invalid='ignore')
assert_almost_equal(np.log(x), y)
finally:
np.seterr(**err)
xl.append(x)
yl.append(y)
# clog(- inf + iy) returns +inf + ipi , for finite positive-signed y.
x = np.array([-np.inf + 1j], dtype=np.complex)
y = np.complex(np.inf, np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + iy) returns +inf + i0, for finite positive-signed y.
x = np.array([np.inf + 1j], dtype=np.complex)
y = np.complex(np.inf, 0)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(- inf + i inf) returns +inf + i3pi /4.
x = np.array([complex(-np.inf, np.inf)], dtype=np.complex)
y = np.complex(np.inf, 0.75 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + i inf) returns +inf + ipi /4.
x = np.array([complex(np.inf, np.inf)], dtype=np.complex)
y = np.complex(np.inf, 0.25 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+/- inf + iNaN) returns +inf + iNaN.
x = np.array([complex(np.inf, np.nan)], dtype=np.complex)
y = np.complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
x = np.array([complex(-np.inf, np.nan)], dtype=np.complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iy) returns NaN + iNaN and optionally raises the
# 'invalid' floating-point exception, for finite y.
x = np.array([complex(np.nan, 1)], dtype=np.complex)
y = np.complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + i inf) returns +inf + iNaN.
x = np.array([complex(np.nan, np.inf)], dtype=np.complex)
y = np.complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iNaN) returns NaN + iNaN.
x = np.array([complex(np.nan, np.nan)], dtype=np.complex)
y = np.complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(conj(z)) = conj(clog(z)).
xa = np.array(xl, dtype=np.complex)
ya = np.array(yl, dtype=np.complex)
err = np.seterr(divide='ignore')
try:
for i in range(len(xa)):
assert_almost_equal(np.log(np.conj(xa[i])), np.conj(np.log(xa[i])))
finally:
np.seterr(**err)
class TestCsqrt(object):
def test_simple(self):
# sqrt(1)
yield check_complex_value, np.sqrt, 1, 0, 1, 0
# sqrt(1i)
yield check_complex_value, np.sqrt, 0, 1, 0.5*np.sqrt(2), 0.5*np.sqrt(2), False
# sqrt(-1)
yield check_complex_value, np.sqrt, -1, 0, 0, 1
def test_simple_conjugate(self):
ref = np.conj(np.sqrt(np.complex(1, 1)))
def f(z):
return np.sqrt(np.conj(z))
yield check_complex_value, f, 1, 1, ref.real, ref.imag, False
#def test_branch_cut(self):
# _check_branch_cut(f, -1, 0, 1, -1)
@platform_skip
def test_special_values(self):
check = check_complex_value
f = np.sqrt
# C99: Sec G 6.4.2
x, y = [], []
# csqrt(+-0 + 0i) is 0 + 0i
yield check, f, np.PZERO, 0, 0, 0
yield check, f, np.NZERO, 0, 0, 0
# csqrt(x + infi) is inf + infi for any x (including NaN)
yield check, f, 1, np.inf, np.inf, np.inf
yield check, f, -1, np.inf, np.inf, np.inf
yield check, f, np.PZERO, np.inf, np.inf, np.inf
yield check, f, np.NZERO, np.inf, np.inf, np.inf
yield check, f, np.inf, np.inf, np.inf, np.inf
yield check, f, -np.inf, np.inf, np.inf, np.inf
yield check, f, -np.nan, np.inf, np.inf, np.inf
# csqrt(x + nani) is nan + nani for any finite x
yield check, f, 1, np.nan, np.nan, np.nan
yield check, f, -1, np.nan, np.nan, np.nan
yield check, f, 0, np.nan, np.nan, np.nan
# csqrt(-inf + yi) is +0 + infi for any finite y > 0
yield check, f, -np.inf, 1, np.PZERO, np.inf
# csqrt(inf + yi) is +inf + 0i for any finite y > 0
yield check, f, np.inf, 1, np.inf, np.PZERO
# csqrt(-inf + nani) is nan +- infi (both +i infi are valid)
def _check_ninf_nan(dummy):
msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)"
z = np.sqrt(np.array(np.complex(-np.inf, np.nan)))
#Fixme: ugly workaround for isinf bug.
err = np.seterr(invalid='ignore')
try:
if not (np.isnan(z.real) and np.isinf(z.imag)):
raise AssertionError(msgform % (z.real, z.imag))
finally:
np.seterr(**err)
yield _check_ninf_nan, None
# csqrt(+inf + nani) is inf + nani
yield check, f, np.inf, np.nan, np.inf, np.nan
# csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x
# + nani)
yield check, f, np.nan, 0, np.nan, np.nan
yield check, f, np.nan, 1, np.nan, np.nan
yield check, f, np.nan, np.nan, np.nan, np.nan
# XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
# cuts first)
class TestCpow(TestCase):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = x ** 2
y = np.power(x, 2)
for i in range(len(x)):
assert_almost_equal(y[i], y_r[i])
def test_scalar(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = range(len(x))
# Compute the values for complex type in python
p_r = [complex(x[i]) ** complex(y[i]) for i in lx]
# Substitute a result allowed by C99 standard
p_r[4] = complex(np.inf, np.nan)
# Do the same with numpy complex scalars
n_r = [x[i] ** y[i] for i in lx]
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
def test_array(self):
x = np.array([1, 1j, 2, 2.5+.37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5+1.5j, -0.5+1.5j, 2, 3])
lx = range(len(x))
# Compute the values for complex type in python
p_r = [complex(x[i]) ** complex(y[i]) for i in lx]
# Substitute a result allowed by C99 standard
p_r[4] = complex(np.inf, np.nan)
# Do the same with numpy arrays
n_r = x ** y
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
class TestCabs(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
def tearDown(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([1+1j, 0+2j, 1+2j, np.inf, np.nan])
y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan])
y = np.abs(x)
for i in range(len(x)):
assert_almost_equal(y[i], y_r[i])
def test_fabs(self):
# Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
x = np.array([1+0j], dtype=np.complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(1, np.NZERO)], dtype=np.complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.inf, np.NZERO)], dtype=np.complex)
assert_array_equal(np.abs(x), np.real(x))
x = np.array([complex(np.nan, np.NZERO)], dtype=np.complex)
assert_array_equal(np.abs(x), np.real(x))
def test_cabs_inf_nan(self):
x, y = [], []
# cabs(+-nan + nani) returns nan
x.append(np.nan)
y.append(np.nan)
yield check_real_value, np.abs, np.nan, np.nan, np.nan
x.append(np.nan)
y.append(-np.nan)
yield check_real_value, np.abs, -np.nan, np.nan, np.nan
# According to C99 standard, if exactly one of the real/part is inf and
# the other nan, then cabs should return inf
x.append(np.inf)
y.append(np.nan)
yield check_real_value, np.abs, np.inf, np.nan, np.inf
x.append(-np.inf)
y.append(np.nan)
yield check_real_value, np.abs, -np.inf, np.nan, np.inf
# cabs(conj(z)) == conj(cabs(z)) (= cabs(z))
def f(a):
return np.abs(np.conj(a))
def g(a, b):
return np.abs(np.complex(a, b))
xa = np.array(x, dtype=np.complex)
ya = np.array(x, dtype=np.complex)
for i in range(len(xa)):
ref = g(x[i], y[i])
yield check_real_value, f, x[i], y[i], ref
class TestCarg(object):
def test_simple(self):
check_real_value(ncu._arg, 1, 0, 0, False)
check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False)
check_real_value(ncu._arg, 1, 1, 0.25*np.pi, False)
check_real_value(ncu._arg, np.PZERO, np.PZERO, np.PZERO)
@dec.knownfailureif(True,
"Complex arithmetic with signed zero is buggy on most implementation")
def test_zero(self):
# carg(-0 +- 0i) returns +- pi
yield check_real_value, ncu._arg, np.NZERO, np.PZERO, np.pi, False
yield check_real_value, ncu._arg, np.NZERO, np.NZERO, -np.pi, False
# carg(+0 +- 0i) returns +- 0
yield check_real_value, ncu._arg, np.PZERO, np.PZERO, np.PZERO
yield check_real_value, ncu._arg, np.PZERO, np.NZERO, np.NZERO
# carg(x +- 0i) returns +- 0 for x > 0
yield check_real_value, ncu._arg, 1, np.PZERO, np.PZERO, False
yield check_real_value, ncu._arg, 1, np.NZERO, np.NZERO, False
# carg(x +- 0i) returns +- pi for x < 0
yield check_real_value, ncu._arg, -1, np.PZERO, np.pi, False
yield check_real_value, ncu._arg, -1, np.NZERO, -np.pi, False
# carg(+- 0 + yi) returns pi/2 for y > 0
yield check_real_value, ncu._arg, np.PZERO, 1, 0.5 * np.pi, False
yield check_real_value, ncu._arg, np.NZERO, 1, 0.5 * np.pi, False
# carg(+- 0 + yi) returns -pi/2 for y < 0
yield check_real_value, ncu._arg, np.PZERO, -1, 0.5 * np.pi, False
yield check_real_value, ncu._arg, np.NZERO, -1,-0.5 * np.pi, False
#def test_branch_cuts(self):
# _check_branch_cut(ncu._arg, -1, 1j, -1, 1)
def test_special_values(self):
# carg(-np.inf +- yi) returns +-pi for finite y > 0
yield check_real_value, ncu._arg, -np.inf, 1, np.pi, False
yield check_real_value, ncu._arg, -np.inf, -1, -np.pi, False
# carg(np.inf +- yi) returns +-0 for finite y > 0
yield check_real_value, ncu._arg, np.inf, 1, np.PZERO, False
yield check_real_value, ncu._arg, np.inf, -1, np.NZERO, False
# carg(x +- np.infi) returns +-pi/2 for finite x
yield check_real_value, ncu._arg, 1, np.inf, 0.5 * np.pi, False
yield check_real_value, ncu._arg, 1, -np.inf, -0.5 * np.pi, False
# carg(-np.inf +- np.infi) returns +-3pi/4
yield check_real_value, ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False
yield check_real_value, ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False
# carg(np.inf +- np.infi) returns +-pi/4
yield check_real_value, ncu._arg, np.inf, np.inf, 0.25 * np.pi, False
yield check_real_value, ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False
# carg(x + yi) returns np.nan if x or y is nan
yield check_real_value, ncu._arg, np.nan, 0, np.nan, False
yield check_real_value, ncu._arg, 0, np.nan, np.nan, False
yield check_real_value, ncu._arg, np.nan, np.inf, np.nan, False
yield check_real_value, ncu._arg, np.inf, np.nan, np.nan, False
def check_real_value(f, x1, y1, x, exact=True):
z1 = np.array([complex(x1, y1)])
if exact:
assert_equal(f(z1), x)
else:
assert_almost_equal(f(z1), x)
def check_complex_value(f, x1, y1, x2, y2, exact=True):
err = np.seterr(invalid='ignore')
z1 = np.array([complex(x1, y1)])
z2 = np.complex(x2, y2)
try:
if exact:
assert_equal(f(z1), z2)
else:
assert_almost_equal(f(z1), z2)
finally:
np.seterr(**err)
if __name__ == "__main__":
run_module_suite()
| dwf/numpy | numpy/core/tests/test_umath_complex.py | Python | bsd-3-clause | 20,513 |
'''
Author: Jason.Parks
Created: Jan 17, 2012
Module: THQ_common.thq_perforce.p426.win64.__init__
Purpose: to import win64 perforce module
'''
print "THQ_common.thq_perforce.p426.win64.__init__ imported" | CountZer0/PipelineConstructionSet | python/common/perforce/p426/win64/__init__.py | Python | bsd-3-clause | 216 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
GdalUtils.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
import subprocess
import platform
import re
import warnings
import psycopg2
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from osgeo import ogr
from qgis.core import (Qgis,
QgsApplication,
QgsVectorFileWriter,
QgsProcessingFeedback,
QgsProcessingUtils,
QgsMessageLog,
QgsSettings,
QgsCredentials,
QgsDataSourceUri)
from processing.core.ProcessingConfig import ProcessingConfig
from processing.tools.system import isWindows, isMac
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from osgeo import gdal # NOQA
gdalAvailable = True
except:
gdalAvailable = False
class GdalUtils:
GDAL_HELP_PATH = 'GDAL_HELP_PATH'
supportedRasters = None
supportedOutputRasters = None
@staticmethod
def runGdal(commands, feedback=None):
if feedback is None:
feedback = QgsProcessingFeedback()
envval = os.getenv('PATH')
# We need to give some extra hints to get things picked up on OS X
isDarwin = False
try:
isDarwin = platform.system() == 'Darwin'
except IOError: # https://travis-ci.org/m-kuhn/QGIS#L1493-L1526
pass
if isDarwin and os.path.isfile(os.path.join(QgsApplication.prefixPath(), "bin", "gdalinfo")):
# Looks like there's a bundled gdal. Let's use it.
os.environ['PATH'] = "{}{}{}".format(os.path.join(QgsApplication.prefixPath(), "bin"), os.pathsep, envval)
os.environ['DYLD_LIBRARY_PATH'] = os.path.join(QgsApplication.prefixPath(), "lib")
else:
# Other platforms should use default gdal finder codepath
settings = QgsSettings()
path = settings.value('/GdalTools/gdalPath', '')
if not path.lower() in envval.lower().split(os.pathsep):
envval += '{}{}'.format(os.pathsep, path)
os.putenv('PATH', envval)
fused_command = ' '.join([str(c) for c in commands])
QgsMessageLog.logMessage(fused_command, 'Processing', Qgis.Info)
feedback.pushInfo('GDAL command:')
feedback.pushCommandInfo(fused_command)
feedback.pushInfo('GDAL command output:')
success = False
retry_count = 0
while not success:
loglines = []
loglines.append('GDAL execution console output')
try:
with subprocess.Popen(
fused_command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
universal_newlines=True,
) as proc:
for line in proc.stdout:
feedback.pushConsoleInfo(line)
loglines.append(line)
success = True
except IOError as e:
if retry_count < 5:
retry_count += 1
else:
raise IOError(
str(e) + u'\nTried 5 times without success. Last iteration stopped after reading {} line(s).\nLast line(s):\n{}'.format(
len(loglines), u'\n'.join(loglines[-10:])))
QgsMessageLog.logMessage('\n'.join(loglines), 'Processing', Qgis.Info)
GdalUtils.consoleOutput = loglines
@staticmethod
def getConsoleOutput():
return GdalUtils.consoleOutput
@staticmethod
def getSupportedRasters():
if not gdalAvailable:
return {}
if GdalUtils.supportedRasters is not None:
return GdalUtils.supportedRasters
if gdal.GetDriverCount() == 0:
gdal.AllRegister()
GdalUtils.supportedRasters = {}
GdalUtils.supportedOutputRasters = {}
GdalUtils.supportedRasters['GTiff'] = ['tif']
GdalUtils.supportedOutputRasters['GTiff'] = ['tif']
for i in range(gdal.GetDriverCount()):
driver = gdal.GetDriver(i)
if driver is None:
continue
shortName = driver.ShortName
metadata = driver.GetMetadata()
if gdal.DCAP_RASTER not in metadata \
or metadata[gdal.DCAP_RASTER] != 'YES':
continue
if gdal.DMD_EXTENSION in metadata:
extensions = metadata[gdal.DMD_EXTENSION].split('/')
if extensions:
GdalUtils.supportedRasters[shortName] = extensions
# Only creatable rasters can be referenced in output rasters
if ((gdal.DCAP_CREATE in metadata and
metadata[gdal.DCAP_CREATE] == 'YES') or
(gdal.DCAP_CREATECOPY in metadata and
metadata[gdal.DCAP_CREATECOPY] == 'YES')):
GdalUtils.supportedOutputRasters[shortName] = extensions
return GdalUtils.supportedRasters
@staticmethod
def getSupportedOutputRasters():
if not gdalAvailable:
return {}
if GdalUtils.supportedOutputRasters is not None:
return GdalUtils.supportedOutputRasters
else:
GdalUtils.getSupportedRasters()
return GdalUtils.supportedOutputRasters
@staticmethod
def getSupportedRasterExtensions():
allexts = ['tif']
for exts in list(GdalUtils.getSupportedRasters().values()):
for ext in exts:
if ext not in allexts and ext != '':
allexts.append(ext)
return allexts
@staticmethod
def getSupportedOutputRasterExtensions():
allexts = ['tif']
for exts in list(GdalUtils.getSupportedOutputRasters().values()):
for ext in exts:
if ext not in allexts and ext != '':
allexts.append(ext)
return allexts
@staticmethod
def getVectorDriverFromFileName(filename):
ext = os.path.splitext(filename)[1]
if ext == '':
return 'ESRI Shapefile'
formats = QgsVectorFileWriter.supportedFiltersAndFormats()
for format in formats:
if ext in format.filterString:
return format.driverName
return 'ESRI Shapefile'
@staticmethod
def getFormatShortNameFromFilename(filename):
ext = filename[filename.rfind('.') + 1:]
supported = GdalUtils.getSupportedRasters()
for name in list(supported.keys()):
exts = supported[name]
if ext in exts:
return name
return 'GTiff'
@staticmethod
def escapeAndJoin(strList):
joined = ''
for s in strList:
if not isinstance(s, str):
s = str(s)
if s and s[0] != '-' and ' ' in s:
escaped = '"' + s.replace('\\', '\\\\').replace('"', '\\"') \
+ '"'
else:
escaped = s
if escaped is not None:
joined += escaped + ' '
return joined.strip()
@staticmethod
def version():
return int(gdal.VersionInfo('VERSION_NUM'))
@staticmethod
def readableVersion():
return gdal.VersionInfo('RELEASE_NAME')
@staticmethod
def ogrConnectionStringFromLayer(layer):
"""Generates OGR connection string from a layer
"""
return GdalUtils.ogrConnectionStringAndFormatFromLayer(layer)[0]
@staticmethod
def ogrConnectionStringAndFormat(uri, context):
"""Generates OGR connection string and format string from layer source
Returned values are a tuple of the connection string and format string
"""
ogrstr = None
format = None
layer = QgsProcessingUtils.mapLayerFromString(uri, context, False)
if layer is None:
path, ext = os.path.splitext(uri)
format = QgsVectorFileWriter.driverForExtension(ext)
return uri, '"' + format + '"'
return GdalUtils.ogrConnectionStringAndFormatFromLayer(layer)
@staticmethod
def ogrConnectionStringAndFormatFromLayer(layer):
provider = layer.dataProvider().name()
if provider == 'spatialite':
# dbname='/geodata/osm_ch.sqlite' table="places" (Geometry) sql=
regex = re.compile("dbname='(.+)'")
r = regex.search(str(layer.source()))
ogrstr = r.groups()[0]
format = 'SQLite'
elif provider == 'postgres':
# dbname='ktryjh_iuuqef' host=spacialdb.com port=9999
# user='ktryjh_iuuqef' password='xyqwer' sslmode=disable
# key='gid' estimatedmetadata=true srid=4326 type=MULTIPOLYGON
# table="t4" (geom) sql=
dsUri = QgsDataSourceUri(layer.dataProvider().dataSourceUri())
conninfo = dsUri.connectionInfo()
conn = None
ok = False
while not conn:
try:
conn = psycopg2.connect(dsUri.connectionInfo())
except psycopg2.OperationalError:
(ok, user, passwd) = QgsCredentials.instance().get(conninfo, dsUri.username(), dsUri.password())
if not ok:
break
dsUri.setUsername(user)
dsUri.setPassword(passwd)
if not conn:
raise RuntimeError('Could not connect to PostgreSQL database - check connection info')
if ok:
QgsCredentials.instance().put(conninfo, user, passwd)
ogrstr = "PG:%s" % dsUri.connectionInfo()
format = 'PostgreSQL'
elif provider == 'mssql':
#'dbname=\'db_name\' host=myHost estimatedmetadata=true
# srid=27700 type=MultiPolygon table="dbo"."my_table"
# #(Shape) sql='
dsUri = layer.dataProvider().uri()
ogrstr = 'MSSQL:'
ogrstr += 'database={0};'.format(dsUri.database())
ogrstr += 'server={0};'.format(dsUri.host())
if dsUri.username() != "":
ogrstr += 'uid={0};'.format(dsUri.username())
else:
ogrstr += 'trusted_connection=yes;'
if dsUri.password() != '':
ogrstr += 'pwd={0};'.format(dsUri.password())
ogrstr += 'tables={0}'.format(dsUri.table())
format = 'MSSQL'
elif provider == "oracle":
# OCI:user/password@host:port/service:table
dsUri = QgsDataSourceUri(layer.dataProvider().dataSourceUri())
ogrstr = "OCI:"
if dsUri.username() != "":
ogrstr += dsUri.username()
if dsUri.password() != "":
ogrstr += "/" + dsUri.password()
delim = "@"
if dsUri.host() != "":
ogrstr += delim + dsUri.host()
delim = ""
if dsUri.port() != "" and dsUri.port() != '1521':
ogrstr += ":" + dsUri.port()
ogrstr += "/"
if dsUri.database() != "":
ogrstr += dsUri.database()
elif dsUri.database() != "":
ogrstr += delim + dsUri.database()
if ogrstr == "OCI:":
raise RuntimeError('Invalid oracle data source - check connection info')
ogrstr += ":"
if dsUri.schema() != "":
ogrstr += dsUri.schema() + "."
ogrstr += dsUri.table()
format = 'OCI'
elif provider.lower() == "wfs":
uri = QgsDataSourceUri(layer.source())
baseUrl = uri.param('url').split('?')[0]
ogrstr = "WFS:{}".format(baseUrl)
format = 'WFS'
else:
ogrstr = str(layer.source()).split("|")[0]
path, ext = os.path.splitext(ogrstr)
format = QgsVectorFileWriter.driverForExtension(ext)
return ogrstr, '"' + format + '"'
@staticmethod
def ogrOutputLayerName(uri):
uri = uri.strip('"')
return os.path.basename(os.path.splitext(uri)[0])
@staticmethod
def ogrLayerName(uri):
uri = uri.strip('"')
if ' table=' in uri:
# table="schema"."table"
re_table_schema = re.compile(' table="([^"]*)"\\."([^"]*)"')
r = re_table_schema.search(uri)
if r:
return r.groups()[0] + '.' + r.groups()[1]
# table="table"
re_table = re.compile(' table="([^"]*)"')
r = re_table.search(uri)
if r:
return r.groups()[0]
elif 'layername' in uri:
regex = re.compile('(layername=)([^|]*)')
r = regex.search(uri)
return r.groups()[1]
fields = uri.split('|')
basePath = fields[0]
fields = fields[1:]
layerid = 0
for f in fields:
if f.startswith('layername='):
return f.split('=')[1]
if f.startswith('layerid='):
layerid = int(f.split('=')[1])
ds = ogr.Open(basePath)
if not ds:
return None
ly = ds.GetLayer(layerid)
if not ly:
return None
name = ly.GetName()
ds = None
return name
@staticmethod
def parseCreationOptions(value):
parts = value.split('|')
options = []
for p in parts:
options.extend(['-co', p])
return options
@staticmethod
def writeLayerParameterToTextFile(filename, alg, parameters, parameter_name, context, quote=True, executing=False):
listFile = QgsProcessingUtils.generateTempFilename(filename)
if executing:
layers = []
for l in alg.parameterAsLayerList(parameters, parameter_name, context):
if quote:
layers.append('"' + l.source() + '"')
else:
layers.append(l.source())
with open(listFile, 'w') as f:
f.write('\n'.join(layers))
return listFile
@staticmethod
def gdal_crs_string(crs):
"""
Converts a QgsCoordinateReferenceSystem to a string understandable
by GDAL
:param crs: crs to convert
:return: gdal friendly string
"""
if crs.authid().upper().startswith('EPSG:'):
return crs.authid()
# fallback to proj4 string, stripping out newline characters
return crs.toProj4().replace('\n', ' ').replace('\r', ' ')
| tudorbarascu/QGIS | python/plugins/processing/algs/gdal/GdalUtils.py | Python | gpl-2.0 | 15,861 |
"""
Autotest scheduler watcher main library.
"""
import os, sys, signal, time, subprocess, logging
from optparse import OptionParser
try:
import autotest.common as common
except ImportError:
import common
from autotest.scheduler import watcher_logging_config
from autotest.client.shared import error, global_config, utils
from autotest.client.shared import logging_manager
from autotest.scheduler import scheduler_logging_config
from autotest.scheduler import monitor_db
PAUSE_LENGTH = 60
STALL_TIMEOUT = 2*60*60
autodir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
results_dir = os.path.join(autodir, 'results')
monitor_db_path = os.path.join(autodir, 'scheduler/autotest-scheduler')
def run_banner_output(cmd):
"""Returns ------ CMD ------\nCMD_OUTPUT in a string"""
banner_output = '%s\n%%s\n\n' % cmd.center(60, '-')
command_output = ''
try:
cmd_out = utils.run(cmd, ignore_status=True, timeout=30)
command_output = cmd_out.stdout + cmd_out.stderr
except error.CmdError:
command_output = 'Timed out'
return banner_output % command_output
def kill_monitor():
logging.info("Killing scheduler")
# try shutdown first
utils.signal_program(monitor_db.PID_FILE_PREFIX, sig=signal.SIGINT)
if utils.program_is_alive(monitor_db.PID_FILE_PREFIX): # was it killed?
# give it some time to shutdown
time.sleep(30)
# kill it
utils.signal_program(monitor_db.PID_FILE_PREFIX)
def handle_sigterm(signum, frame):
logging.info('Caught SIGTERM')
kill_monitor()
utils.delete_pid_file_if_exists(monitor_db.WATCHER_PID_FILE_PREFIX)
sys.exit(1)
signal.signal(signal.SIGTERM, handle_sigterm)
SiteMonitorProc = utils.import_site_class(
__file__, 'autotest.scheduler.site_monitor_db_watcher',
'SiteMonitorProc', object)
class MonitorProc(SiteMonitorProc):
def __init__(self, do_recovery=False):
args = [monitor_db_path]
if do_recovery:
args.append("--recover-hosts")
args.append(results_dir)
kill_monitor()
scheduler_config = scheduler_logging_config.SchedulerLoggingConfig
log_name = scheduler_config.get_log_name()
os.environ['AUTOTEST_SCHEDULER_LOG_NAME'] = log_name
scheduler_log_dir = scheduler_config.get_server_log_dir()
self.log_path = os.path.join(scheduler_log_dir, log_name)
self.log_size = 0
self.last_log_change = time.time()
logging.info("Starting scheduler with log file %s" % self.log_path)
self.args = args
# Allow site specific code to run, set environment variables and
# modify self.args if desired.
super(MonitorProc, self).__init__()
def start(self):
devnull = open(os.devnull, 'w')
self.proc = subprocess.Popen(self.args, stdout=devnull)
def is_running(self):
if self.proc.poll() is not None:
logging.info("Scheduler died")
return False
old_size = self.log_size
new_size = os.path.getsize(self.log_path)
if old_size != new_size:
logging.info("Log was touched")
self.log_size = new_size
self.last_log_change = time.time()
elif self.last_log_change + STALL_TIMEOUT < time.time():
logging.info("Scheduler stalled")
self.collect_stalled_info()
return False
return True
def collect_stalled_info(self):
INFO_TO_COLLECT = ['uptime',
'ps auxwww',
'iostat -k -x 2 4',
]
db_cmd = '/usr/bin/mysqladmin --verbose processlist -u%s -p%s'
config = global_config.global_config
try:
user = config.get_config_value("BACKUP", "user")
password = config.get_config_value("BACKUP", "password")
db_cmd %= (user, password)
INFO_TO_COLLECT.append(db_cmd)
except global_config.ConfigError:
pass
stall_log_path = self.log_path + '.stall_info'
log = open(stall_log_path, "w")
for cmd in INFO_TO_COLLECT:
log.write(run_banner_output(cmd))
log.close()
def main():
parser = OptionParser()
parser.add_option("-r", action="store_true", dest="recover",
help=("run recovery mode (implicit after any crash)"))
parser.add_option("--background", dest="background", action="store_true",
default=False, help=("runs the scheduler monitor on "
"background"))
(options, args) = parser.parse_args()
recover = (options.recover == True)
if len(args) != 0:
parser.print_help()
sys.exit(1)
if os.getuid() == 0:
logging.critical("Running as root, aborting!")
sys.exit(1)
if utils.program_is_alive(monitor_db.WATCHER_PID_FILE_PREFIX):
logging.critical("autotest-monitor-watcher already running, aborting!")
sys.exit(1)
utils.write_pid(monitor_db.WATCHER_PID_FILE_PREFIX)
if options.background:
logging_manager.configure_logging(
watcher_logging_config.WatcherLoggingConfig(use_console=False))
# Double fork - see http://code.activestate.com/recipes/66012/
try:
pid = os.fork()
if (pid > 0):
sys.exit(0) # exit from first parent
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" %
(e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
os.chdir("/")
os.umask(0)
os.setsid()
# Second fork
try:
pid = os.fork()
if (pid > 0):
sys.exit(0) # exit from second parent
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" %
(e.errno, e.strerror))
sys.exit(1)
else:
logging_manager.configure_logging(
watcher_logging_config.WatcherLoggingConfig())
while True:
proc = MonitorProc(do_recovery=recover)
proc.start()
time.sleep(PAUSE_LENGTH)
while proc.is_running():
logging.info("Tick")
time.sleep(PAUSE_LENGTH)
recover = False
| nacc/autotest | scheduler/monitor_db_watcher.py | Python | gpl-2.0 | 6,407 |
from urllib.parse import urljoin
from pulsar import as_coroutine, task
from pulsar.utils.httpurl import Headers
from pulsar.utils.log import LocalMixin, local_property
from pulsar.apps.wsgi import Route, wsgi_request
from pulsar.apps.http import HttpClient
ENVIRON_HEADERS = ('content-type', 'content-length')
class Proxy(LocalMixin):
'''Proxy requests to another server
'''
def __init__(self, route, url):
self.route = Route(route)
self.url = url
@local_property
def http_client(self):
'''The :class:`.HttpClient` used by this proxy middleware for
accessing upstream resources'''
return HttpClient(decompress=False, store_cookies=False)
def __call__(self, environ, start_response):
request = wsgi_request(environ)
path = request.path
match = self.route.match(path[1:])
if match is not None:
query = request.get('QUERY_STRING', '')
path = urljoin(self.url, match.pop('__remaining__', ''))
if query:
path = '%s?%s' % (path, query)
return self._call(request, path, start_response)
@task
def _call(self, request, path, start_response):
data, files = yield from as_coroutine(request.data_and_files())
response = yield from self.http_client.request(
request.method, path, data=data, files=files,
headers=self.request_headers(request.environ),
version=request.get('SERVER_PROTOCOL'))
response.raise_for_status()
start_response(response.get_status(), list(response.headers))
return [response.get_content()]
def request_headers(self, environ):
'''Fill request headers from the environ dictionary and
modify them via the list of :attr:`headers_middleware`.
The returned headers will be sent to the target uri.
'''
headers = Headers(kind='client')
for k in environ:
if k.startswith('HTTP_'):
head = k[5:].replace('_', '-')
headers[head] = environ[k]
for head in ENVIRON_HEADERS:
k = head.replace('-', '_').upper()
v = environ.get(k)
if v:
headers[head] = v
return headers
| ymero/pulsar | pulsar/apps/proxy/__init__.py | Python | bsd-3-clause | 2,279 |
from django.db import models
class Post(models.Model):
"""A blog post (attached to a senator via Appointment)"""
author = models.ForeignKey('senate.Appointment')
title = models.CharField(max_length=80)
body = models.TextField()
posted = models.DateTimeField(editable=True)
slug = models.SlugField()
class Meta:
ordering = ['posted']
def __unicode__(self):
return u"{0} by {1} ({2}) on {3}".format(
self.title,
self.author.name,
self.author.position.title,
self.posted,
)
@models.permalink
def get_absolute_url(self):
return ('post_detail', [
self.posted.strftime("%Y"),
self.posted.strftime("%b"),
self.slug])
def save(self, *args, **kwargs):
self.slug = self.slug.lower()
super(Post, self).save(*args, **kwargs) | aspc/mainsite | aspc/blog/models.py | Python | mit | 902 |
import re
from debris.asset import encode
from debris.asset import decode
class Memory(object):
def __init__(self, config=None):
self.cache = {}
def get(self, key):
if key in self.cache:
return self.cache[key]
raise LookupError("Key not found in memory, %s" % key)
def set(self, key, data):
self.cache[key] = data
def keys(self, search=None):
if search:
keys = self.cache.keys()
rc = re.compile(search)
return [key for key in keys if re.search(rc, key, re.I)]
else:
return self.cache.keys()
def remove(self, key, **reasons):
if key == '*':
self.cache = {}
return None
elif key in self.cache:
self.cache[key].destroy(reasons)
del self.cache[key]
return True
return False
def empty(self, tags, **reasons):
"""Destroy Assets based on the tags
provide additional `**reasons` to inform
the assets why they will be destroyed
"""
pass
# tags = set(tags)
# for key, asset in self.cache.items():
# if tags & asset.tags:
# asset.destroy(reasons)
# del self.cache[key]
| stevepeak/debris | debris/addons/memory.py | Python | apache-2.0 | 1,282 |
##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import inspect
import six
import unittest
import imath
import IECore
import Gaffer
import GafferTest
class SpreadsheetTest( GafferTest.TestCase ) :
def testConstructor( self ) :
self.assertEqual( Gaffer.Spreadsheet().getName(), "Spreadsheet" )
s = Gaffer.Spreadsheet( "s" )
self.assertEqual( s.getName(), "s" )
# Check default row
self.assertEqual( len( s["rows"] ), 1 )
self.assertEqual( s["rows"][0].getName(), "default" )
self.assertEqual( s["rows"][0]["name"].getValue(), "" )
self.assertEqual( s["rows"][0]["enabled"].getValue(), True )
self.assertIsInstance( s["rows"][0], Gaffer.Spreadsheet.RowPlug )
# Check we have no columns
self.assertEqual( len( s["rows"][0]["cells"] ), 0 )
self.assertEqual( len( s["out"] ), 0 )
def testEditColumnsAndRows( self ) :
s = Gaffer.Spreadsheet()
columnIndex = s["rows"].addColumn( Gaffer.IntPlug( "myInt" ) )
self.assertEqual( columnIndex, 0 )
self.assertEqual( len( s["rows"][0]["cells"] ), 1 )
self.assertIsInstance( s["rows"][0]["cells"][0], Gaffer.Spreadsheet.CellPlug )
self.assertEqual( s["rows"][0]["cells"][0].getName(), "myInt" )
self.assertIsInstance( s["rows"][0]["cells"][0]["value"], Gaffer.IntPlug )
self.assertEqual( s["rows"][0]["cells"][0]["enabled"].getValue(), True )
self.assertEqual( s["rows"][0]["cells"][0]["value"].getValue(), 0 )
self.assertEqual( len( s["out"] ), 1 )
self.assertEqual( s["out"][0].getName(), "myInt" )
self.assertIsInstance( s["out"][0], Gaffer.IntPlug )
self.assertEqual( s["out"][0].direction(), Gaffer.Plug.Direction.Out )
columnIndex = s["rows"].addColumn( Gaffer.FloatPlug( "myFloat", defaultValue = 1 ) )
self.assertEqual( columnIndex, 1 )
self.assertEqual( len( s["rows"][0]["cells"] ), 2 )
self.assertIsInstance( s["rows"][0]["cells"][1], Gaffer.Spreadsheet.CellPlug )
self.assertEqual( s["rows"][0]["cells"][1].getName(), "myFloat" )
self.assertIsInstance( s["rows"][0]["cells"][1]["value"], Gaffer.FloatPlug )
self.assertEqual( s["rows"][0]["cells"][1]["enabled"].getValue(), True )
self.assertEqual( s["rows"][0]["cells"][1]["value"].getValue(), 1 )
self.assertEqual( len( s["out"] ), 2 )
self.assertEqual( s["out"][1].getName(), "myFloat" )
self.assertIsInstance( s["out"][1], Gaffer.FloatPlug )
self.assertEqual( s["out"][1].direction(), Gaffer.Plug.Direction.Out )
row = s["rows"].addRow()
self.assertIsInstance( row, Gaffer.Spreadsheet.RowPlug )
self.assertEqual( row.parent(), s["rows"] )
self.assertEqual( row.getName(), "row1" )
self.assertEqual( len( row["cells"] ), 2 )
self.assertEqual( row["cells"][0].getName(), "myInt" )
self.assertEqual( row["cells"][1].getName(), "myFloat" )
self.assertIsInstance( row["cells"][0], Gaffer.Spreadsheet.CellPlug )
self.assertIsInstance( row["cells"][1], Gaffer.Spreadsheet.CellPlug )
self.assertEqual( row["cells"][0]["enabled"].getValue(), True )
self.assertEqual( row["cells"][0]["value"].getValue(), 0 )
self.assertEqual( row["cells"][1]["enabled"].getValue(), True )
self.assertEqual( row["cells"][1]["value"].getValue(), 1 )
s["rows"].removeColumn( columnIndex )
self.assertEqual( len( s["rows"][0]["cells"] ), 1 )
self.assertEqual( s["rows"][0]["cells"][0].getName(), "myInt" )
self.assertEqual( len( s["out"] ), 1 )
self.assertEqual( s["out"][0].getName(), "myInt" )
def testRemoveMiddleColumn( self ) :
s = Gaffer.ScriptNode()
s["s"] = Gaffer.Spreadsheet()
s["s"]["rows"].addRow()
c1 = s["s"]["rows"].addColumn( Gaffer.IntPlug( "c1" ) )
c2 = s["s"]["rows"].addColumn( Gaffer.FloatPlug( "c2" ) )
c3 = s["s"]["rows"].addColumn( Gaffer.StringPlug( "c3" ) )
def assertPreconditions() :
for row in s["s"]["rows"] :
self.assertEqual( row["cells"].keys(), [ "c1", "c2", "c3" ] )
self.assertIsInstance( row["cells"]["c1"]["value"], Gaffer.IntPlug )
self.assertIsInstance( row["cells"]["c2"]["value"], Gaffer.FloatPlug )
self.assertIsInstance( row["cells"]["c3"]["value"], Gaffer.StringPlug )
self.assertEqual( s["s"]["out"].keys(), [ "c1", "c2", "c3" ] )
self.assertIsInstance( s["s"]["out"]["c1"], Gaffer.IntPlug )
self.assertIsInstance( s["s"]["out"]["c2"], Gaffer.FloatPlug )
self.assertIsInstance( s["s"]["out"]["c3"], Gaffer.StringPlug )
assertPreconditions()
with Gaffer.UndoScope( s ) :
s["s"]["rows"].removeColumn( c2 )
def assertPostConditions() :
for row in s["s"]["rows"] :
self.assertEqual( row["cells"].keys(), [ "c1", "c3" ] )
self.assertIsInstance( row["cells"]["c1"]["value"], Gaffer.IntPlug )
self.assertIsInstance( row["cells"]["c3"]["value"], Gaffer.StringPlug )
self.assertEqual( s["s"]["out"].keys(), [ "c1", "c3" ] )
self.assertIsInstance( s["s"]["out"]["c1"], Gaffer.IntPlug )
self.assertIsInstance( s["s"]["out"]["c3"], Gaffer.StringPlug )
assertPostConditions()
s.undo()
assertPreconditions()
s.redo()
assertPostConditions()
s.undo()
assertPreconditions()
s.redo()
assertPostConditions()
def testOutput( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.IntPlug( "column1" ) )
s["rows"].addColumn( Gaffer.IntPlug( "column2" ) )
defaultRow = s["rows"].defaultRow()
row1 = s["rows"].addRow()
row1["name"].setValue( "row1" )
row2 = s["rows"].addRow()
row2["name"].setValue( "row2" )
defaultRow["cells"]["column1"]["value"].setValue( 1 )
defaultRow["cells"]["column2"]["value"].setValue( 2 )
row1["cells"]["column1"]["value"].setValue( 3 )
row1["cells"]["column2"]["value"].setValue( 4 )
row2["cells"]["column1"]["value"].setValue( 5 )
row2["cells"]["column2"]["value"].setValue( 6 )
for selector in ( "", "woteva", "row1", "row2" ) :
s["selector"].setValue( selector )
expectedRow = s["rows"].getChild( selector ) or s["rows"].defaultRow()
for out in s["out"] :
s["enabled"].setValue( True )
self.assertEqual( out.getValue(), expectedRow["cells"][out.getName()]["value"].getValue() )
s["enabled"].setValue( False )
self.assertEqual( out.getValue(), s["rows"].defaultRow()["cells"][out.getName()]["value"].getValue() )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["s"] = Gaffer.Spreadsheet()
s["s"]["rows"].addColumn( Gaffer.IntPlug( "column1" ) )
s["s"]["rows"].addColumn( Gaffer.IntPlug( "column2" ) )
s["s"]["rows"].addRow()
s["s"]["rows"].addRow()
s["s"]["rows"][0]["cells"]["column1"]["value"].setValue( 10 )
s["s"]["rows"][1]["cells"]["column1"]["value"].setValue( 20 )
s["s"]["rows"][1]["cells"]["column1"]["enabled"].setValue( False )
s["s"]["rows"][1]["name"].setValue( "rrr" )
s["s"]["rows"][2]["name"].setValue( "zzz" )
s["s"]["rows"][2]["cells"]["column1"]["value"].setValue( 30 )
s["s"]["rows"][2]["cells"]["column2"]["value"].setValue( 40 )
ss = s.serialise()
self.assertEqual( ss.count( "addChild" ), 1 )
self.assertEqual( ss.count( "addColumn" ), 2 )
self.assertEqual( ss.count( "addRows" ), 1 )
s2 = Gaffer.ScriptNode()
s2.execute( ss )
self.assertEqual( s2["s"]["rows"].keys(), s["s"]["rows"].keys() )
for r in s2["s"]["rows"].keys() :
self.assertEqual( s2["s"]["rows"][r]["name"].getValue(), s["s"]["rows"][r]["name"].getValue() )
self.assertEqual( s2["s"]["rows"][r]["enabled"].getValue(), s["s"]["rows"][r]["enabled"].getValue() )
self.assertEqual( s2["s"]["rows"][r]["cells"].keys(), s["s"]["rows"][r]["cells"].keys() )
for c in s2["s"]["rows"][r]["cells"].keys() :
self.assertEqual( s2["s"]["rows"][r]["cells"][c]["enabled"].getValue(), s["s"]["rows"][r]["cells"][c]["enabled"].getValue() )
self.assertEqual( s2["s"]["rows"][r]["cells"][c]["value"].getValue(), s["s"]["rows"][r]["cells"][c]["value"].getValue() )
def testNestedPlugs( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.TransformPlug( "transform" ) )
r = s["rows"].addRow()
self.assertEqual(
s.correspondingInput( s["out"]["transform"]["translate"]["x"] ),
s["rows"][0]["cells"]["transform"]["value"]["translate"]["x"]
)
r["name"].setValue( "n" )
r["cells"]["transform"]["value"]["translate"].setValue( imath.V3f( 1, 2, 3 ) )
self.assertEqual( s["out"]["transform"]["translate"].getValue(), imath.V3f( 0 ) )
s["selector"].setValue( "n" )
self.assertEqual( s["out"]["transform"]["translate"].getValue(), imath.V3f( 1, 2, 3 ) )
def testDirtyPropagation( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.V3fPlug( "v" ) )
s["rows"].addColumn( Gaffer.FloatPlug( "f" ) )
r = s["rows"].addRow()
cs = GafferTest.CapturingSlot( s.plugDirtiedSignal() )
s["enabled"].setValue( False )
self.assertTrue( set( s["out"].children() ).issubset( { x[0] for x in cs } ) )
del cs[:]
r["cells"]["v"]["value"]["x"].setValue( 2 )
self.assertIn( s["out"]["v"]["x"], { x[0] for x in cs } )
self.assertNotIn( s["out"]["v"]["z"], { x[0] for x in cs } )
self.assertNotIn( s["out"]["f"], { x[0] for x in cs } )
del cs[:]
r["cells"]["v"]["enabled"].setValue( False )
self.assertTrue( set( s["out"]["v"].children() ).issubset( { x[0] for x in cs } ) )
self.assertNotIn( s["out"]["f"], { x[0] for x in cs } )
del cs[:]
s["rows"].addRow()
self.assertTrue( set( s["out"].children() ).issubset( { x[0] for x in cs } ) )
del cs[:]
s["rows"].removeChild( s["rows"][-1] )
self.assertTrue( set( s["out"].children() ).issubset( { x[0] for x in cs } ) )
del cs[:]
def testDisablingRows( self ) :
s = Gaffer.Spreadsheet()
s["selector"].setValue( "a" )
s["rows"].addColumn( Gaffer.IntPlug( "i" ) )
r = s["rows"].addRow()
r["name"].setValue( "a" )
r["cells"]["i"]["value"].setValue( 2 )
self.assertEqual( s["out"]["i"].getValue(), 2 )
r["enabled"].setValue( False )
self.assertEqual( s["out"]["i"].getValue(), 0 )
def testCorrespondingInput( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.IntPlug( "column1" ) )
s["rows"].addRows( 2 )
self.assertEqual( s.correspondingInput( s["out"]["column1"] ), s["rows"].defaultRow()["cells"]["column1"]["value"] )
self.assertEqual( s.correspondingInput( s["out"] ), None )
def testPromotion( self ) :
def assertCellEqual( cellPlug1, cellPlug2 ) :
self.assertEqual( cellPlug1.getName(), cellPlug2.getName() )
self.assertIsInstance( cellPlug1, Gaffer.Spreadsheet.CellPlug )
self.assertIsInstance( cellPlug2, Gaffer.Spreadsheet.CellPlug )
self.assertEqual( cellPlug1["enabled"].getValue(), cellPlug2["enabled"].getValue() )
self.assertEqual( cellPlug1["value"].getValue(), cellPlug2["value"].getValue() )
def assertRowEqual( rowPlug1, rowPlug2 ) :
self.assertEqual( rowPlug1.getName(), rowPlug2.getName() )
self.assertIsInstance( rowPlug1, Gaffer.Spreadsheet.RowPlug )
self.assertIsInstance( rowPlug2, Gaffer.Spreadsheet.RowPlug )
self.assertEqual( rowPlug1["name"].getValue(), rowPlug2["name"].getValue() )
self.assertEqual( rowPlug1["enabled"].getValue(), rowPlug2["enabled"].getValue() )
self.assertEqual( rowPlug1["cells"].keys(), rowPlug2["cells"].keys() )
for k in rowPlug1["cells"].keys() :
assertCellEqual( rowPlug1["cells"][k], rowPlug2["cells"][k] )
def assertRowsEqual( rowsPlug1, rowsPlug2 ) :
self.assertIsInstance( rowsPlug1, Gaffer.Spreadsheet.RowsPlug )
self.assertIsInstance( rowsPlug2, Gaffer.Spreadsheet.RowsPlug )
self.assertEqual( rowsPlug1.keys(), rowsPlug2.keys() )
for k in rowsPlug1.keys() :
assertRowEqual( rowsPlug1[k], rowsPlug2[k] )
def assertOutputsValid( spreadsheet ) :
self.assertEqual( spreadsheet["rows"].defaultRow()["cells"].keys(), spreadsheet["out"].keys() )
for o in spreadsheet["out"] :
self.assertEqual(
spreadsheet.correspondingInput( o ),
spreadsheet["rows"].defaultRow()["cells"][o.getName()]["value"]
)
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
# Make a Spreadsheet with some existing cells
# and promote the "rows" plug.
s["b"]["s1"] = Gaffer.Spreadsheet()
s["b"]["s1"]["rows"].addColumn( Gaffer.IntPlug( "i" ) )
s["b"]["s1"]["rows"].addRow()["cells"][0]["value"].setValue( 10 )
s["b"]["s1"]["rows"].addRow()["cells"][0]["value"].setValue( 20 )
p1 = Gaffer.PlugAlgo.promote( s["b"]["s1"]["rows"] )
assertRowsEqual( p1, s["b"]["s1"]["rows"] )
assertOutputsValid( s["b"]["s1"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["s1"]["rows"] ) )
# Promote the "rows" plug on an empty spreadsheet,
# and add some cells.
s["b"]["s2"] = Gaffer.Spreadsheet()
p2 = Gaffer.PlugAlgo.promote( s["b"]["s2"]["rows"] )
assertRowsEqual( p2, s["b"]["s2"]["rows"] )
assertOutputsValid( s["b"]["s2"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["s2"]["rows"] ) )
p2.addColumn( Gaffer.IntPlug( "i" ) )
p2.addRow()["cells"][0]["value"].setValue( 10 )
p2.addRow()["cells"][0]["value"].setValue( 20 )
assertRowsEqual( p2, s["b"]["s2"]["rows"] )
assertOutputsValid( s["b"]["s2"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["s2"]["rows"] ) )
p2.addColumn( Gaffer.IntPlug( "j" ) )
assertRowsEqual( p2, s["b"]["s2"]["rows"] )
assertOutputsValid( s["b"]["s2"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["s2"]["rows"] ) )
# Remove a column
p2.removeColumn( 0 )
assertRowsEqual( p2, s["b"]["s2"]["rows"] )
assertOutputsValid( s["b"]["s2"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( s["b"]["s2"]["rows"] ) )
# Serialise and reload, and check all is well
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
assertRowsEqual( s2["b"]["s1"]["rows"], s["b"]["s1"]["rows"] )
assertRowsEqual( s2["b"]["s2"]["rows"], s["b"]["s2"]["rows"] )
assertOutputsValid( s["b"]["s1"] )
assertOutputsValid( s["b"]["s2"] )
def testNoRedundantSetInputCalls( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["s"] = Gaffer.Spreadsheet()
s["b"]["s"]["rows"].addColumn( Gaffer.IntPlug( "i" ) )
s["b"]["s"]["rows"].addRows( 2 )
Gaffer.PlugAlgo.promote( s["b"]["s"]["rows"] )
# We should only need a single `setInput()` call
# on `rows` to serialise all the connections
# for the entire spreadsheet.
ss = s.serialise()
self.assertEqual( ss.count( "setInput" ), 1 )
s2 = Gaffer.ScriptNode()
s2.execute( ss )
self.assertEqual( s2["b"]["s"]["rows"].getInput(), s2["b"]["rows"] )
def testActiveRowNames( self ) :
s = Gaffer.Spreadsheet()
for i in range( 1, 4 ) :
s["rows"].addRow()["name"].setValue( str( i ) )
self.assertEqual( s["activeRowNames"].getValue(), IECore.StringVectorData( [ "1", "2", "3" ] ) )
s["rows"][1]["enabled"].setValue( False )
self.assertEqual( s["activeRowNames"].getValue(), IECore.StringVectorData( [ "2", "3" ] ) )
s["rows"][2]["name"].setValue( "two" )
self.assertEqual( s["activeRowNames"].getValue(), IECore.StringVectorData( [ "two", "3" ] ) )
def testAddColumnUsingDynamicPlug( self ) :
s = Gaffer.ScriptNode()
s["s"] = Gaffer.Spreadsheet()
s["s"]["rows"].addColumn( Gaffer.Color3fPlug( "c", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
s["s"]["rows"].addColumn( Gaffer.IntPlug( "i", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["s"]["rows"][0]["cells"].keys(), s["s"]["rows"][0]["cells"].keys() )
self.assertEqual( s2["s"]["out"].keys(), s["s"]["out"].keys() )
self.assertEqual( s2.serialise(), s.serialise() )
def testActiveInput( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.V3fPlug( "v" ) )
s["rows"].addColumn( Gaffer.IntPlug( "i" ) )
s["rows"].addRow()["name"].setValue( "a" )
s["rows"].addRow()["name"].setValue( "b" )
s["selector"].setValue( "${testSelector}" )
with Gaffer.Context() as c :
self.assertEqual( s.activeInPlug( s["out"]["v"] ), s["rows"].defaultRow()["cells"]["v"]["value"] )
self.assertEqual( s.activeInPlug( s["out"]["v"]["x"] ), s["rows"].defaultRow()["cells"]["v"]["value"]["x"] )
self.assertEqual( s.activeInPlug( s["out"]["i"] ), s["rows"].defaultRow()["cells"]["i"]["value"] )
c["testSelector"] = "a"
self.assertEqual( s.activeInPlug( s["out"]["v"] ), s["rows"][1]["cells"]["v"]["value"] )
self.assertEqual( s.activeInPlug( s["out"]["v"]["x"] ), s["rows"][1]["cells"]["v"]["value"]["x"] )
self.assertEqual( s.activeInPlug( s["out"]["i"] ), s["rows"][1]["cells"]["i"]["value"] )
c["testSelector"] = "b"
self.assertEqual( s.activeInPlug( s["out"]["v"] ), s["rows"][2]["cells"]["v"]["value"] )
self.assertEqual( s.activeInPlug( s["out"]["v"]["x"] ), s["rows"][2]["cells"]["v"]["value"]["x"] )
self.assertEqual( s.activeInPlug( s["out"]["i"] ), s["rows"][2]["cells"]["i"]["value"] )
c["testSelector"] = "x"
self.assertEqual( s.activeInPlug( s["out"]["v"] ), s["rows"].defaultRow()["cells"]["v"]["value"] )
self.assertEqual( s.activeInPlug( s["out"]["v"]["x"] ), s["rows"].defaultRow()["cells"]["v"]["value"]["x"] )
self.assertEqual( s.activeInPlug( s["out"]["i"] ), s["rows"].defaultRow()["cells"]["i"]["value"] )
def testAddColumnWithName( self ) :
s = Gaffer.Spreadsheet()
i = s["rows"].addColumn( Gaffer.IntPlug( "x" ), name = "y" )
self.assertEqual( s["rows"].defaultRow()["cells"][0].getName(), "y" )
self.assertEqual( s["out"][0].getName(), "y" )
def testAddColumnCopiesCurrentValue( self ) :
p = Gaffer.IntPlug( defaultValue = 1, minValue = -10, maxValue = 10 )
p.setValue( 3 )
s = Gaffer.Spreadsheet()
s["rows"].addRow()
s["rows"].addColumn( p )
for row in s["rows"] :
self.assertEqual( row["cells"][0]["value"].defaultValue(), p.defaultValue() )
self.assertEqual( row["cells"][0]["value"].minValue(), p.minValue() )
self.assertEqual( row["cells"][0]["value"].maxValue(), p.maxValue() )
self.assertEqual( row["cells"][0]["value"].getValue(), p.getValue() )
def testRemoveRow( self ) :
s = Gaffer.Spreadsheet()
s2 = Gaffer.Spreadsheet( "other" )
defaultRow = s["rows"].defaultRow()
row1 = s["rows"].addRow()
row2 = s["rows"].addRow()
otherRow = s2["rows"].addRow()
self.assertEqual( len( s["rows"] ), 3 )
with six.assertRaisesRegex( self, RuntimeError, 'Cannot remove default row from "Spreadsheet.rows"' ) :
s["rows"].removeRow( defaultRow )
self.assertEqual( len( s["rows"] ), 3 )
with six.assertRaisesRegex( self, RuntimeError, 'Row "other.rows.row1" is not a child of "Spreadsheet.rows"' ) :
s["rows"].removeRow( otherRow )
self.assertEqual( len( s["rows"] ), 3 )
s["rows"].removeRow( row1 )
self.assertEqual( s["rows"].children(), ( defaultRow, row2 ) )
def testComputeDuringColumnAdditionandDeletion( self ) :
script = Gaffer.ScriptNode()
script["sheet"] = Gaffer.Spreadsheet()
values = []
exceptions = []
def outputAddedOrRemoved( parent, child ) :
try :
values.append( child.getValue() )
except Exception as e :
exceptions.append( e )
script["sheet"]["out"].childAddedSignal().connect( outputAddedOrRemoved, scoped = False )
script["sheet"]["out"].childRemovedSignal().connect( outputAddedOrRemoved, scoped = False )
with Gaffer.UndoScope( script ) :
script["sheet"]["rows"].addColumn( Gaffer.StringPlug( "column1" ) )
with Gaffer.UndoScope( script ) :
script["sheet"]["rows"].removeColumn( 0 )
script.undo()
script.undo()
script.redo()
script.redo()
self.assertEqual( len( values ), 6 )
self.assertEqual( len( exceptions ), 0 )
def testDeleteRowsAndSerialise( self ) :
s = Gaffer.ScriptNode()
s["s"] = Gaffer.Spreadsheet()
s["s"]["rows"].addRows( 3 )
s["s"]["rows"].addColumn( Gaffer.IntPlug( "v" ) )
for i in range( 0, 4 ) :
s["s"]["rows"][i]["cells"]["v"]["value"].setValue( i )
s["s"]["rows"].removeRow( s["s"]["rows"][1] )
ss = Gaffer.ScriptNode()
ss.execute( s.serialise() )
for i, v in [
( 0, 0 ),
( 1, 2 ),
( 2, 3 ),
] :
self.assertEqual( ss["s"]["rows"][i]["cells"]["v"]["value"].getValue(), v )
def testWildcards( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.IntPlug( "v" ) )
row1 = s["rows"].addRow()
row2 = s["rows"].addRow()
row1["cells"]["v"]["value"].setValue( 1 )
row2["cells"]["v"]["value"].setValue( 2 )
s["selector"].setValue( "cat" )
self.assertEqual( s["out"]["v"].getValue(), 0 )
row1["name"].setValue( "cat" )
self.assertEqual( s["out"]["v"].getValue(), 1 )
row2["name"].setValue( "cat" )
self.assertEqual( s["out"]["v"].getValue(), 1 )
row1["name"].setValue( "ca*" )
self.assertEqual( s["out"]["v"].getValue(), 1 )
row1["name"].setValue( "dog" )
self.assertEqual( s["out"]["v"].getValue(), 2 )
row2["name"].setValue( "ca*" )
self.assertEqual( s["out"]["v"].getValue(), 2 )
def testSelectorVariablesRemovedFromRowNameContext( self ) :
s = Gaffer.ScriptNode()
s["s"] = Gaffer.Spreadsheet()
s["s"]["rows"].addColumn( Gaffer.StringPlug( "v" ) )
# `row` will only be selected if the context is not
# cleaned up when evaluating the row name. If it is
# selected then the spreadsheet will output "unexpectedVariable"
# instead of the "" we expect.
row = s["s"]["rows"].addRow()
row["cells"]["v"]["value"].setValue( "unexpectedVariable" )
s["e"] = Gaffer.Expression()
s["e"].setExpression( inspect.cleandoc(
"""
a = context.get( "A" )
b = context.get( "B" )
parent["s"]["rows"]["row1"]["name"] = "*" if ( a or b ) else ""
"""
) )
s["s"]["selector"].setValue( "${A}" )
with Gaffer.Context() as c :
c["A"] = "${B}"
c["B"] = "boo"
self.assertEqual( s["s"]["out"]["v"].getValue(), "" )
self.assertEqual( s["s"]["out"]["v"].getValue(), "" )
def testRowAccessor( self ) :
s = Gaffer.ScriptNode()
s["s"] = Gaffer.Spreadsheet()
self.assertEqual( s["s"]["rows"].row( "" ), None )
self.assertEqual( s["s"]["rows"].row( "x" ), None )
self.assertEqual( s["s"]["rows"].row( "y" ), None )
self.assertEqual( s["s"]["rows"].row( "z" ), None )
with Gaffer.UndoScope( s ) :
row1 = s["s"]["rows"].addRow()
row1["name"].setValue( "x" )
row2 = s["s"]["rows"].addRow()
row2["name"].setValue( "y" )
self.assertEqual( s["s"]["rows"].row( "" ), None )
self.assertEqual( s["s"]["rows"].row( "x" ), row1 )
self.assertEqual( s["s"]["rows"].row( "y" ), row2 )
self.assertEqual( s["s"]["rows"].row( "z" ), None )
with Gaffer.UndoScope( s ) :
row2["name"].setValue( "z" )
self.assertEqual( s["s"]["rows"].row( "" ), None )
self.assertEqual( s["s"]["rows"].row( "x" ), row1 )
self.assertEqual( s["s"]["rows"].row( "y" ), None )
self.assertEqual( s["s"]["rows"].row( "z" ), row2 )
with Gaffer.UndoScope( s ) :
row3 = s["s"]["rows"].addRow()
row3["name"].setValue( "y" )
self.assertEqual( s["s"]["rows"].row( "" ), None )
self.assertEqual( s["s"]["rows"].row( "x" ), row1 )
self.assertEqual( s["s"]["rows"].row( "y" ), row3 )
self.assertEqual( s["s"]["rows"].row( "z" ), row2 )
s.undo()
self.assertEqual( s["s"]["rows"].row( "" ), None )
self.assertEqual( s["s"]["rows"].row( "x" ), row1 )
self.assertEqual( s["s"]["rows"].row( "y" ), None )
self.assertEqual( s["s"]["rows"].row( "z" ), row2 )
s.undo()
self.assertEqual( s["s"]["rows"].row( "" ), None )
self.assertEqual( s["s"]["rows"].row( "x" ), row1 )
self.assertEqual( s["s"]["rows"].row( "y" ), row2 )
self.assertEqual( s["s"]["rows"].row( "z" ), None )
s.undo()
self.assertEqual( s["s"]["rows"].row( "" ), None )
self.assertEqual( s["s"]["rows"].row( "x" ), None )
self.assertEqual( s["s"]["rows"].row( "y" ), None )
self.assertEqual( s["s"]["rows"].row( "z" ), None )
def testRowAccessorWithEmptyName( self ) :
s = Gaffer.Spreadsheet()
self.assertEqual( s["rows"].row( "" ), None )
r = s["rows"].addRow()
self.assertEqual( s["rows"].row( "" ), r )
r["name"].setValue( "n" )
self.assertEqual( s["rows"].row( "" ), None )
def testRowAccessorWithDuplicateNames( self ) :
s = Gaffer.Spreadsheet()
row1 = s["rows"].addRow()
row2 = s["rows"].addRow()
row3 = s["rows"].addRow()
self.assertEqual( s["rows"].row( "" ), row1 )
s["rows"].removeRow( row1 )
self.assertEqual( s["rows"].row( "" ), row2 )
s["rows"].removeRow( row2 )
self.assertEqual( s["rows"].row( "" ), row3 )
def testRowAccessorWithNoParent( self ) :
rows = Gaffer.Spreadsheet.RowsPlug()
row1 = rows.addRow()
row2 = rows.addRow()
row1["name"].setValue( "x" )
row2["name"].setValue( "y" )
self.assertEqual( rows.row( "x" ), row1 )
self.assertEqual( rows.row( "y" ), row2 )
row1["name"].setValue( "z" )
row2["name"].setValue( "w" )
self.assertEqual( rows.row( "x" ), None )
self.assertEqual( rows.row( "y" ), None )
self.assertEqual( rows.row( "z" ), row1 )
self.assertEqual( rows.row( "w" ), row2 )
def testRowAccessorWithPromotion( self ) :
box = Gaffer.Box()
box["s"] = Gaffer.Spreadsheet()
rows = Gaffer.PlugAlgo.promote( box["s"]["rows"] )
row1 = rows.addRow()
row2 = rows.addRow()
row1["name"].setValue( "x" )
row2["name"].setValue( "y" )
self.assertEqual( rows.row( "x" ), row1 )
self.assertEqual( rows.row( "y" ), row2 )
self.assertEqual( rows.row( "z" ), None )
self.assertEqual( box["s"]["rows"].row( "x" ).source(), row1 )
self.assertEqual( box["s"]["rows"].row( "y" ).source(), row2 )
self.assertEqual( box["s"]["rows"].row( "z" ), None )
def testRowAccessorWithComputedNames( self ) :
n = GafferTest.StringInOutNode()
n["in"].setValue( "x" )
s = Gaffer.Spreadsheet()
r = s["rows"].addRow()
r["name"].setInput( n["out"] )
self.assertEqual( s["rows"].row( "x" ), None )
def testRowAccessorWithWildcards( self ) :
s = Gaffer.Spreadsheet()
r = s["rows"].addRow()
r["name"].setValue( "abc*[0-9]" )
self.assertEqual( s["rows"].row( "abc*[0-9]" ), r )
def testAdoptedEnabledPlug( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.NameValuePlug( "name", "defaultValue", defaultEnabled = True ), name = "c1", adoptEnabledPlug = True )
row1 = s["rows"].addRow()
row1["name"].setValue( "row1" )
row2 = s["rows"].addRow()
row2["name"].setValue( "row2" )
for row in Gaffer.Spreadsheet.RowPlug.Range( s["rows"] ) :
self.assertNotIn( "enabled", row["cells"]["c1"] )
self.assertEqual( row["cells"]["c1"].enabledPlug(), row["cells"]["c1"]["value"]["enabled"] )
self.assertEqual( row["cells"]["c1"].enabledPlug().getValue(), True )
row1["cells"]["c1"]["value"]["value"].setValue( "row1Value" )
row2["cells"]["c1"]["value"]["value"].setValue( "row2Value" )
s["selector"].setValue( "row1" )
self.assertEqual( s["out"]["c1"]["value"].getValue(), "row1Value" )
self.assertEqual( s["out"]["c1"]["enabled"].getValue(), True )
s["selector"].setValue( "row2" )
self.assertEqual( s["out"]["c1"]["value"].getValue(), "row2Value" )
self.assertEqual( s["out"]["c1"]["enabled"].getValue(), True )
s["selector"].setValue( "notARow" )
self.assertEqual( s["out"]["c1"]["value"].getValue(), "defaultValue" )
self.assertEqual( s["out"]["c1"]["enabled"].getValue(), True )
s["selector"].setValue( "row1" )
row1["cells"]["c1"].enabledPlug().setValue( False )
self.assertEqual( s["out"]["c1"]["value"].getValue(), "defaultValue" )
self.assertEqual( s["out"]["c1"]["enabled"].getValue(), True )
s["rows"]["default"]["cells"]["c1"].enabledPlug().setValue( False )
self.assertEqual( s["out"]["c1"]["value"].getValue(), "defaultValue" )
self.assertEqual( s["out"]["c1"]["enabled"].getValue(), False )
def testAdoptedEnabledPlugChecks( self ) :
s = Gaffer.Spreadsheet()
with six.assertRaisesRegex( self, RuntimeError, 'Value plug has no "enabled" plug to adopt' ) :
s["rows"].addColumn( Gaffer.IntPlug(), adoptEnabledPlug = True )
def testAdoptedEnabledPlugSerialisation( self ) :
s = Gaffer.ScriptNode()
s["s"] = Gaffer.Spreadsheet()
s["s"]["rows"].addColumn( Gaffer.NameValuePlug( "n", "v", defaultEnabled = True ), name = "c1", adoptEnabledPlug = True )
s["s"]["rows"].addRow()
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
for row in Gaffer.Spreadsheet.RowPlug.Range( s2["s"]["rows"] ) :
self.assertNotIn( "enabled", row["cells"]["c1"] )
self.assertEqual( row["cells"]["c1"].enabledPlug(), row["cells"]["c1"]["value"]["enabled"] )
self.assertEqual( row["cells"]["c1"].enabledPlug().getValue(), True )
def testDefaultValueSerialisation( self ) :
s = Gaffer.ScriptNode()
s["s"] = Gaffer.Spreadsheet()
s["s"]["rows"].addColumn( Gaffer.V3iPlug( "c1", defaultValue = imath.V3i( 1, 2, 3 ) ) )
s["s"]["rows"].addColumn( Gaffer.Box2iPlug( "c2", defaultValue = imath.Box2i( imath.V2i( 0 ), imath.V2i( 1 ) ) ) )
s["s"]["rows"].addRows( 3 )
# Change defaults for some plugs
s["s"]["rows"][1]["name"].setValue( "testName" )
s["s"]["rows"][1]["cells"]["c1"]["value"].setValue( imath.V3i( 4, 5, 6 ) )
s["s"]["rows"][2]["enabled"].setValue( False )
s["s"]["rows"][2]["cells"]["c1"]["value"]["x"].setValue( 10 )
s["s"]["rows"][3]["cells"]["c1"]["enabled"].setValue( False )
s["s"]["rows"][3]["cells"]["c2"]["value"].setValue( imath.Box2i( imath.V2i( 10 ), imath.V2i( 11 ) ) )
s["s"]["rows"][1]["name"].resetDefault()
# Change values for some plugs, some of which also had their
# defaults changed.
s["s"]["rows"][1]["name"].setValue( "testName2" )
s["s"]["rows"][1]["cells"]["c1"]["value"]["x"].setValue( 7 )
s["s"]["rows"][3]["enabled"].setValue( False )
# Check that everything round-trips correctly through a serialisation and load.
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s["s"]["rows"].defaultHash(), s2["s"]["rows"].defaultHash() )
self.assertEqual( s["s"]["rows"].hash(), s2["s"]["rows"].hash() )
def testResolvedRows( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.IntPlug( "c1", defaultValue = 10 ) )
s["rows"].addColumn( Gaffer.StringPlug( "c2", defaultValue = "" ) )
s["rows"].addColumn( Gaffer.V2iPlug( "c3", defaultValue = imath.V2i( 0 ) ) )
s["rows"].addRows( 3 )
s["rows"][1]["name"].setValue( "row1" )
s["rows"][2]["name"].setValue( "row2" )
s["rows"][3]["name"].setValue( "row3" )
def assertExpectedValues( expected ) :
resolved = s["resolvedRows"].getValue()
self.assertIsInstance( resolved, IECore.CompoundObject )
self.assertEqual( len( resolved ), len( expected ) )
for name, expectedRow in expected.items() :
self.assertEqual( resolved[name]["c1"].value, expectedRow[0] )
self.assertEqual( resolved[name]["c2"].value, expectedRow[1] )
self.assertEqual( resolved[name]["c3"].value, expectedRow[2] )
assertExpectedValues( {
"row1" : [ 10, "", imath.V2i( 0 ) ],
"row2" : [ 10, "", imath.V2i( 0 ) ],
"row3" : [ 10, "", imath.V2i( 0 ) ],
} )
s["rows"][1]["cells"]["c1"]["value"].setValue( 20 )
s["rows"][2]["cells"]["c2"]["value"].setValue( "b" )
s["rows"][3]["cells"]["c3"]["value"].setValue( imath.V2i( 10 ) )
assertExpectedValues( {
"row1" : [ 20, "", imath.V2i( 0 ) ],
"row2" : [ 10, "b", imath.V2i( 0 ) ],
"row3" : [ 10, "", imath.V2i( 10 ) ],
} )
s["rows"][1]["cells"]["c1"]["enabled"].setValue( False )
assertExpectedValues( {
"row1" : [ 10, "", imath.V2i( 0 ) ],
"row2" : [ 10, "b", imath.V2i( 0 ) ],
"row3" : [ 10, "", imath.V2i( 10 ) ],
} )
s["rows"][3]["enabled"].setValue( False )
assertExpectedValues( {
"row1" : [ 10, "", imath.V2i( 0 ) ],
"row2" : [ 10, "b", imath.V2i( 0 ) ],
} )
s["rows"][2]["name"].setValue( "row1" )
assertExpectedValues( {
"row1" : [ 10, "", imath.V2i( 0 ) ],
} )
def testReorderRows( self ) :
spreadsheet = Gaffer.Spreadsheet()
spreadsheet["rows"].addColumn( Gaffer.IntPlug( "c1", defaultValue = 10 ) )
spreadsheet["selector"].setValue( "test" )
# Two rows with the same name. The first one should win.
spreadsheet["rows"].addRows( 2 )
spreadsheet["rows"][1]["name"].setValue( "test" )
spreadsheet["rows"][1]["cells"]["c1"]["value"].setValue( 20 )
spreadsheet["rows"][2]["name"].setValue( "test" )
spreadsheet["rows"][2]["cells"]["c1"]["value"].setValue( 30 )
self.assertEqual( spreadsheet["out"]["c1"].getValue(), 20 )
self.assertEqual( spreadsheet["rows"].row( "test" ), spreadsheet["rows"][1] )
# And if you reorder them, the (new) first one should still win.
spreadsheet["rows"].reorderChildren( [ spreadsheet["rows"][0], spreadsheet["rows"][2], spreadsheet["rows"][1] ] )
self.assertEqual( spreadsheet["out"]["c1"].getValue(), 30 )
self.assertEqual( spreadsheet["rows"].row( "test" ), spreadsheet["rows"][1] )
def testUnnamedRowsNeverMatch( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addColumn( Gaffer.IntPlug( "i" ) )
row = s["rows"].addRow()
row["name"].setValue( "" )
row["cells"]["i"]["value"].setValue( 1 )
# Selector is "", but we shouldn't match it to the unnamed row because
# that is unintuitive. As a general rule in Gaffer, if something
# hasn't been given a name then it is treated as if it was disabled.
self.assertEqual( s["out"]["i"].getValue(), 0 )
# That should be reinforced by excluding the row from the `activeRows`
# and `resolvedRows` outputs.
self.assertEqual( s["activeRowNames"].getValue(), IECore.StringVectorData() )
self.assertNotIn( "", s["resolvedRows"].getValue() )
# The same should apply even when the selector receives the empty value
# via a substitution.
s["selector"].setValue( "${selector}" )
with Gaffer.Context() as c :
self.assertEqual( s["out"]["i"].getValue(), 0 )
# If the variable exists but is empty, we _still_ don't want to
# match the empty row. The existence of the variable is not what we
# care about : the existence of the row is, and we treat unnamed
# rows as non-existent.
c["selector"] = ""
self.assertEqual( s["out"]["i"].getValue(), 0 )
self.assertEqual( s["activeRowNames"].getValue(), IECore.StringVectorData() )
self.assertNotIn( "", s["resolvedRows"].getValue() )
# But by that logic, a row named '*' _should_ match the empty
# variable.
row["name"].setValue( "*" )
self.assertEqual( s["out"]["i"].getValue(), 1 )
self.assertEqual( s["activeRowNames"].getValue(), IECore.StringVectorData( [ "*" ] ) )
self.assertIn( "*", s["resolvedRows"].getValue() )
# Even if the variable doesnt exist at all.
del c["selector"]
self.assertEqual( s["out"]["i"].getValue(), 1 )
self.assertEqual( s["activeRowNames"].getValue(), IECore.StringVectorData( [ "*" ] ) )
self.assertIn( "*", s["resolvedRows"].getValue() )
@GafferTest.TestRunner.PerformanceTestMethod()
def testAddRowPerformance( self ) :
s = Gaffer.Spreadsheet()
for i in range( 0, 10000 ) :
s["rows"].addRow()
@GafferTest.TestRunner.PerformanceTestMethod()
def testAddRowsPerformance( self ) :
s = Gaffer.Spreadsheet()
s["rows"].addRows( 10000 )
@GafferTest.TestRunner.PerformanceTestMethod()
def testSavePerformance( self ) :
s = Gaffer.ScriptNode()
s["s"] = Gaffer.Spreadsheet()
s["s"]["rows"].addColumn( Gaffer.IntPlug( "v" ) )
for i in range( 0, 10000 ) :
s["s"]["rows"].addRow()["cells"]["v"]["value"].setValue( i )
with GafferTest.TestRunner.PerformanceScope() :
s.serialise()
@GafferTest.TestRunner.PerformanceTestMethod()
def testLoadPerformance( self ) :
s = Gaffer.ScriptNode()
s["s"] = Gaffer.Spreadsheet()
s["s"]["rows"].addColumn( Gaffer.IntPlug( "v" ) )
for i in range( 0, 10000 ) :
s["s"]["rows"].addRow()["cells"]["v"]["value"].setValue( i )
serialised = s.serialise()
s = Gaffer.ScriptNode()
with GafferTest.TestRunner.PerformanceScope() :
s.execute( serialised )
@GafferTest.TestRunner.PerformanceTestMethod()
def testRowIndexPerformance( self ) :
s = Gaffer.Spreadsheet()
s["selector"].setValue( "${index}" )
numRows = 10000
s["rows"].addColumn( Gaffer.IntPlug( "v" ) )
for i in range( 0, numRows ) :
row = s["rows"].addRow()
row["name"].setValue( str( i ) )
row["cells"]["v"]["value"].setValue( i )
c = Gaffer.Context()
out = s["out"]["v"]
with c :
with GafferTest.TestRunner.PerformanceScope() :
for i in range( 0, numRows ) :
c["index"] = i
self.assertEqual( out.getValue(), i )
@GafferTest.TestRunner.PerformanceTestMethod()
def testRowAccessorPerformance( self ) :
s = Gaffer.Spreadsheet()
rows = s["rows"]
for i in range( 0, 10000 ) :
name = "row{}".format( i )
r = rows.addRow()
r["name"].setValue( name )
self.assertEqual( rows.row( name ), r )
if __name__ == "__main__":
unittest.main()
| andrewkaufman/gaffer | python/GafferTest/SpreadsheetTest.py | Python | bsd-3-clause | 38,226 |
t = int(raw_input())
for i in range(t):
n,k = map(int, raw_input().split())
a = map(int, raw_input().split())
b = map(int, raw_input().split())
maximum = (k/a[0])*b[0]
for j in range(n):
t = (k/a[j])*b[j]
if t > maximum:
maximum = t
print maximum | paramsingh/codechef-solutions | src/practice/chefstone.py | Python | mit | 260 |
class Sample(object):
"""
A data point of the Metric
:param metricId: Metric FQN
:type metricId: string
:param timestamp: Timestamp for the sample
:type timestamp: int
:param value: Value of the sample
:type value: float
:param min: Minimum of the sample
:type min: float
:param max: Maximum of the sample
:type max: float
:param avg: Average of the sample
:type avg: float
:param sum: Sum of the sample
:type sum: float
:param cnt: Count of the sample
:type cnt: float
"""
def __init__(self,
metricId,
timestamp,
val=None,
min=None,
max=None,
avg=None,
sum=None,
cnt=None):
self.metricId = metricId
self.timestamp = timestamp
if val is not None:
self.val = val
if max is not None:
self.max = max
if avg is not None:
self.avg = avg
if cnt is not None:
self.cnt = cnt
if min is not None:
self.min = min
if sum is not None:
self.sum = sum
| Netuitive/netuitive-client-python | netuitive/sample.py | Python | apache-2.0 | 1,267 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import nested
import mock
from mock import call
import mox
from oslo.config import cfg
from neutron.agent import firewall as firewall_base
from neutron.agent.linux import iptables_manager
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron import context
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import securitygroup as ext_sg
from neutron.openstack.common.rpc import proxy
from neutron.tests import base
from neutron.tests.unit import test_extension_security_group as test_sg
from neutron.tests.unit import test_iptables_firewall as test_fw
class FakeSGCallback(sg_db_rpc.SecurityGroupServerRpcCallbackMixin):
def get_port_from_device(self, device):
device = self.devices.get(device)
if device:
device['security_group_rules'] = []
device['security_group_source_groups'] = []
device['fixed_ips'] = [ip['ip_address']
for ip in device['fixed_ips']]
return device
class SGServerRpcCallBackMixinTestCase(test_sg.SecurityGroupDBTestCase):
def setUp(self):
super(SGServerRpcCallBackMixinTestCase, self).setUp()
self.rpc = FakeSGCallback()
def test_security_group_rules_for_devices_ipv4_ingress(self):
fake_prefix = test_fw.FAKE_PREFIX['IPv4']
with self.network() as n:
with nested(self.subnet(n),
self.security_group()) as (subnet_v4,
sg1):
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', 'tcp', '22',
'22')
rule2 = self._build_security_group_rule(
sg1_id,
'ingress', 'tcp', '23',
'23', fake_prefix)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': 'IPv4',
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': 'IPv6',
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'ingress', 'protocol': 'tcp',
'ethertype': 'IPv4',
'port_range_max': 23, 'security_group_id': sg1_id,
'port_range_min': 23,
'source_ip_prefix': fake_prefix},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv4_egress(self):
fake_prefix = test_fw.FAKE_PREFIX['IPv4']
with self.network() as n:
with nested(self.subnet(n),
self.security_group()) as (subnet_v4,
sg1):
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'egress', 'tcp', '22',
'22')
rule2 = self._build_security_group_rule(
sg1_id,
'egress', 'udp', '23',
'23', fake_prefix)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': 'IPv4',
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': 'IPv6',
'security_group_id': sg1_id},
{'direction': 'egress',
'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'egress', 'protocol': 'udp',
'ethertype': 'IPv4',
'port_range_max': 23, 'security_group_id': sg1_id,
'port_range_min': 23,
'dest_ip_prefix': fake_prefix},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv4_source_group(self):
with self.network() as n:
with nested(self.subnet(n),
self.security_group(),
self.security_group()) as (subnet_v4,
sg1,
sg2):
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', 'tcp', '24',
'25', remote_group_id=sg2['security_group']['id'])
rules = {
'security_group_rules': [rule1['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id,
sg2_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
res2 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg2_id])
ports_rest2 = self.deserialize(self.fmt, res2)
port_id2 = ports_rest2['port']['id']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': 'IPv4',
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': 'IPv6',
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': 'IPv4',
'security_group_id': sg2_id},
{'direction': 'egress', 'ethertype': 'IPv6',
'security_group_id': sg2_id},
{'direction': u'ingress',
'source_ip_prefix': u'10.0.0.3/32',
'protocol': u'tcp', 'ethertype': u'IPv4',
'port_range_max': 25, 'port_range_min': 24,
'remote_group_id': sg2_id,
'security_group_id': sg1_id},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
self._delete('ports', port_id2)
def test_security_group_rules_for_devices_ipv6_ingress(self):
fake_prefix = test_fw.FAKE_PREFIX['IPv6']
with self.network() as n:
with nested(self.subnet(n,
cidr=fake_prefix,
ip_version=6),
self.security_group()) as (subnet_v6,
sg1):
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', 'tcp', '22',
'22',
ethertype='IPv6')
rule2 = self._build_security_group_rule(
sg1_id,
'ingress', 'udp', '23',
'23', fake_prefix,
ethertype='IPv6')
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
res1 = self._create_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': 'IPv4',
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': 'IPv6',
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': 'tcp', 'ethertype': 'IPv6',
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'ingress', 'protocol': 'udp',
'ethertype': 'IPv6',
'port_range_max': 23, 'security_group_id': sg1_id,
'port_range_min': 23,
'source_ip_prefix': fake_prefix},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv6_egress(self):
fake_prefix = test_fw.FAKE_PREFIX['IPv6']
with self.network() as n:
with nested(self.subnet(n,
cidr=fake_prefix,
ip_version=6),
self.security_group()) as (subnet_v6,
sg1):
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'egress', 'tcp', '22',
'22',
ethertype='IPv6')
rule2 = self._build_security_group_rule(
sg1_id,
'egress', 'udp', '23',
'23', fake_prefix,
ethertype='IPv6')
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
res1 = self._create_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': 'IPv4',
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': 'IPv6',
'security_group_id': sg1_id},
{'direction': 'egress',
'protocol': 'tcp', 'ethertype': 'IPv6',
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'egress', 'protocol': 'udp',
'ethertype': 'IPv6',
'port_range_max': 23, 'security_group_id': sg1_id,
'port_range_min': 23,
'dest_ip_prefix': fake_prefix},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv6_source_group(self):
fake_prefix = test_fw.FAKE_PREFIX['IPv6']
with self.network() as n:
with nested(self.subnet(n,
cidr=fake_prefix,
ip_version=6),
self.security_group(),
self.security_group()) as (subnet_v6,
sg1,
sg2):
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', 'tcp', '24',
'25',
ethertype='IPv6',
remote_group_id=sg2['security_group']['id'])
rules = {
'security_group_rules': [rule1['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
res1 = self._create_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id,
sg2_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
res2 = self._create_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg2_id])
ports_rest2 = self.deserialize(self.fmt, res2)
port_id2 = ports_rest2['port']['id']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': 'IPv4',
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': 'IPv6',
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': 'IPv4',
'security_group_id': sg2_id},
{'direction': 'egress', 'ethertype': 'IPv6',
'security_group_id': sg2_id},
{'direction': 'ingress',
'source_ip_prefix': 'fe80::3/128',
'protocol': 'tcp', 'ethertype': 'IPv6',
'port_range_max': 25, 'port_range_min': 24,
'remote_group_id': sg2_id,
'security_group_id': sg1_id},
]
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
self._delete('ports', port_id2)
class SGServerRpcCallBackMixinTestCaseXML(SGServerRpcCallBackMixinTestCase):
fmt = 'xml'
class SGAgentRpcCallBackMixinTestCase(base.BaseTestCase):
def setUp(self):
super(SGAgentRpcCallBackMixinTestCase, self).setUp()
self.rpc = sg_rpc.SecurityGroupAgentRpcCallbackMixin()
self.rpc.sg_agent = mock.Mock()
def test_security_groups_rule_updated(self):
self.rpc.security_groups_rule_updated(None,
security_groups=['fake_sgid'])
self.rpc.sg_agent.assert_has_calls(
[call.security_groups_rule_updated(['fake_sgid'])])
def test_security_groups_member_updated(self):
self.rpc.security_groups_member_updated(None,
security_groups=['fake_sgid'])
self.rpc.sg_agent.assert_has_calls(
[call.security_groups_member_updated(['fake_sgid'])])
def test_security_groups_provider_updated(self):
self.rpc.security_groups_provider_updated(None)
self.rpc.sg_agent.assert_has_calls(
[call.security_groups_provider_updated()])
class SecurityGroupAgentRpcTestCase(base.BaseTestCase):
def setUp(self):
super(SecurityGroupAgentRpcTestCase, self).setUp()
self.agent = sg_rpc.SecurityGroupAgentRpcMixin()
self.agent.context = None
self.addCleanup(mock.patch.stopall)
mock.patch('neutron.agent.linux.iptables_manager').start()
self.agent.root_helper = 'sudo'
self.agent.init_firewall()
self.firewall = mock.Mock()
firewall_object = firewall_base.FirewallDriver()
self.firewall.defer_apply.side_effect = firewall_object.defer_apply
self.agent.firewall = self.firewall
rpc = mock.Mock()
self.agent.plugin_rpc = rpc
self.fake_device = {'device': 'fake_device',
'security_groups': ['fake_sgid1', 'fake_sgid2'],
'security_group_source_groups': ['fake_sgid2'],
'security_group_rules': [{'security_group_id':
'fake_sgid1',
'remote_group_id':
'fake_sgid2'}]}
fake_devices = {'fake_device': self.fake_device}
self.firewall.ports = fake_devices
rpc.security_group_rules_for_devices.return_value = fake_devices
def test_prepare_and_remove_devices_filter(self):
self.agent.prepare_devices_filter(['fake_device'])
self.agent.remove_devices_filter(['fake_device'])
# ignore device which is not filtered
self.firewall.assert_has_calls([call.defer_apply(),
call.prepare_port_filter(
self.fake_device),
call.defer_apply(),
call.remove_port_filter(
self.fake_device),
])
def test_security_groups_rule_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3'])
self.agent.refresh_firewall.assert_has_calls(
[call.refresh_firewall()])
def test_security_groups_rule_not_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4'])
self.agent.refresh_firewall.assert_has_calls([])
def test_security_groups_member_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3'])
self.agent.refresh_firewall.assert_has_calls(
[call.refresh_firewall()])
def test_security_groups_member_not_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(['fake_sgid3', 'fake_sgid4'])
self.agent.refresh_firewall.assert_has_calls([])
def test_security_groups_provider_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_provider_updated()
self.agent.refresh_firewall.assert_has_calls(
[call.refresh_firewall()])
def test_refresh_firewall(self):
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.refresh_firewall()
calls = [call.defer_apply(),
call.prepare_port_filter(self.fake_device),
call.defer_apply(),
call.update_port_filter(self.fake_device)]
self.firewall.assert_has_calls(calls)
class FakeSGRpcApi(agent_rpc.PluginApi,
sg_rpc.SecurityGroupServerRpcApiMixin):
pass
class SecurityGroupServerRpcApiTestCase(base.BaseTestCase):
def setUp(self):
super(SecurityGroupServerRpcApiTestCase, self).setUp()
self.rpc = FakeSGRpcApi('fake_topic')
self.rpc.call = mock.Mock()
def test_security_group_rules_for_devices(self):
self.rpc.security_group_rules_for_devices(None, ['fake_device'])
self.rpc.call.assert_has_calls(
[call(None,
{'args':
{'devices': ['fake_device']},
'method': 'security_group_rules_for_devices',
'namespace': None},
version=sg_rpc.SG_RPC_VERSION,
topic='fake_topic')])
class FakeSGNotifierAPI(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin):
pass
class SecurityGroupAgentRpcApiTestCase(base.BaseTestCase):
def setUp(self):
super(SecurityGroupAgentRpcApiTestCase, self).setUp()
self.notifier = FakeSGNotifierAPI(topic='fake',
default_version='1.0')
self.notifier.fanout_cast = mock.Mock()
def test_security_groups_rule_updated(self):
self.notifier.security_groups_rule_updated(
None, security_groups=['fake_sgid'])
self.notifier.fanout_cast.assert_has_calls(
[call(None,
{'args':
{'security_groups': ['fake_sgid']},
'method': 'security_groups_rule_updated',
'namespace': None},
version=sg_rpc.SG_RPC_VERSION,
topic='fake-security_group-update')])
def test_security_groups_member_updated(self):
self.notifier.security_groups_member_updated(
None, security_groups=['fake_sgid'])
self.notifier.fanout_cast.assert_has_calls(
[call(None,
{'args':
{'security_groups': ['fake_sgid']},
'method': 'security_groups_member_updated',
'namespace': None},
version=sg_rpc.SG_RPC_VERSION,
topic='fake-security_group-update')])
def test_security_groups_rule_not_updated(self):
self.notifier.security_groups_rule_updated(
None, security_groups=[])
self.assertEqual(False, self.notifier.fanout_cast.called)
def test_security_groups_member_not_updated(self):
self.notifier.security_groups_member_updated(
None, security_groups=[])
self.assertEqual(False, self.notifier.fanout_cast.called)
#Note(nati) bn -> binary_name
# id -> device_id
PHYSDEV_RULE = '-m physdev --physdev-is-bridged'
IPTABLES_ARG = {'bn': iptables_manager.binary_name,
'physdev': PHYSDEV_RULE}
CHAINS_NAT = 'OUTPUT|POSTROUTING|PREROUTING|float-snat|snat'
IPTABLES_ARG['chains'] = CHAINS_NAT
IPTABLES_NAT = """:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:neutron-postrouting-bottom - [0:0]
-A PREROUTING -j %(bn)s-PREROUTING
-A OUTPUT -j %(bn)s-OUTPUT
-A POSTROUTING -j %(bn)s-POSTROUTING
-A POSTROUTING -j neutron-postrouting-bottom
-A neutron-postrouting-bottom -j %(bn)s-snat
-A %(bn)s-snat -j %(bn)s-float-snat
""" % IPTABLES_ARG
CHAINS_EMPTY = 'FORWARD|INPUT|OUTPUT|local|sg-chain|sg-fallback'
CHAINS_1 = CHAINS_EMPTY + '|i_port1|o_port1'
CHAINS_2 = CHAINS_1 + '|i_port2|o_port2'
IPTABLES_ARG['chains'] = CHAINS_1
IPTABLES_FILTER_1 = """:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:neutron-filter-top - [0:0]
-A FORWARD -j neutron-filter-top
-A OUTPUT -j neutron-filter-top
-A neutron-filter-top -j %(bn)s-local
-A INPUT -j %(bn)s-INPUT
-A OUTPUT -j %(bn)s-OUTPUT
-A FORWARD -j %(bn)s-FORWARD
-A %(bn)s-sg-fallback -j DROP
-A %(bn)s-FORWARD %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-i_port1
-A %(bn)s-i_port1 -m state --state INVALID -j DROP
-A %(bn)s-i_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-i_port1 -j RETURN -p udp --dport 68 --sport 67 -s 10.0.0.2
-A %(bn)s-i_port1 -j RETURN -p tcp --dport 22
-A %(bn)s-i_port1 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-INPUT %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-o_port1 -m mac ! --mac-source 12:34:56:78:9a:bc -j DROP
-A %(bn)s-o_port1 -p udp --sport 68 --dport 67 -j RETURN
-A %(bn)s-o_port1 ! -s 10.0.0.3 -j DROP
-A %(bn)s-o_port1 -p udp --sport 67 --dport 68 -j DROP
-A %(bn)s-o_port1 -m state --state INVALID -j DROP
-A %(bn)s-o_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-o_port1 -j RETURN
-A %(bn)s-o_port1 -j %(bn)s-sg-fallback
-A %(bn)s-sg-chain -j ACCEPT
""" % IPTABLES_ARG
IPTABLES_FILTER_1_2 = """:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:neutron-filter-top - [0:0]
-A FORWARD -j neutron-filter-top
-A OUTPUT -j neutron-filter-top
-A neutron-filter-top -j %(bn)s-local
-A INPUT -j %(bn)s-INPUT
-A OUTPUT -j %(bn)s-OUTPUT
-A FORWARD -j %(bn)s-FORWARD
-A %(bn)s-sg-fallback -j DROP
-A %(bn)s-FORWARD %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-i_port1
-A %(bn)s-i_port1 -m state --state INVALID -j DROP
-A %(bn)s-i_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-i_port1 -j RETURN -p udp --dport 68 --sport 67 -s 10.0.0.2
-A %(bn)s-i_port1 -j RETURN -p tcp --dport 22
-A %(bn)s-i_port1 -j RETURN -s 10.0.0.4
-A %(bn)s-i_port1 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-INPUT %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-o_port1 -m mac ! --mac-source 12:34:56:78:9a:bc -j DROP
-A %(bn)s-o_port1 -p udp --sport 68 --dport 67 -j RETURN
-A %(bn)s-o_port1 ! -s 10.0.0.3 -j DROP
-A %(bn)s-o_port1 -p udp --sport 67 --dport 68 -j DROP
-A %(bn)s-o_port1 -m state --state INVALID -j DROP
-A %(bn)s-o_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-o_port1 -j RETURN
-A %(bn)s-o_port1 -j %(bn)s-sg-fallback
-A %(bn)s-sg-chain -j ACCEPT
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_2
IPTABLES_FILTER_2 = """:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:neutron-filter-top - [0:0]
-A FORWARD -j neutron-filter-top
-A OUTPUT -j neutron-filter-top
-A neutron-filter-top -j %(bn)s-local
-A INPUT -j %(bn)s-INPUT
-A OUTPUT -j %(bn)s-OUTPUT
-A FORWARD -j %(bn)s-FORWARD
-A %(bn)s-sg-fallback -j DROP
-A %(bn)s-FORWARD %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-i_port1
-A %(bn)s-i_port1 -m state --state INVALID -j DROP
-A %(bn)s-i_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-i_port1 -j RETURN -p udp --dport 68 --sport 67 -s 10.0.0.2
-A %(bn)s-i_port1 -j RETURN -p tcp --dport 22
-A %(bn)s-i_port1 -j RETURN -s 10.0.0.4
-A %(bn)s-i_port1 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-INPUT %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-o_port1 -m mac ! --mac-source 12:34:56:78:9a:bc -j DROP
-A %(bn)s-o_port1 -p udp --sport 68 --dport 67 -j RETURN
-A %(bn)s-o_port1 ! -s 10.0.0.3 -j DROP
-A %(bn)s-o_port1 -p udp --sport 67 --dport 68 -j DROP
-A %(bn)s-o_port1 -m state --state INVALID -j DROP
-A %(bn)s-o_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-o_port1 -j RETURN
-A %(bn)s-o_port1 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-INGRESS tap_port2 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-INGRESS tap_port2 -j %(bn)s-i_port2
-A %(bn)s-i_port2 -m state --state INVALID -j DROP
-A %(bn)s-i_port2 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-i_port2 -j RETURN -p udp --dport 68 --sport 67 -s 10.0.0.2
-A %(bn)s-i_port2 -j RETURN -p tcp --dport 22
-A %(bn)s-i_port2 -j RETURN -s 10.0.0.3
-A %(bn)s-i_port2 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-o_port2
-A %(bn)s-INPUT %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-o_port2
-A %(bn)s-o_port2 -m mac ! --mac-source 12:34:56:78:9a:bd -j DROP
-A %(bn)s-o_port2 -p udp --sport 68 --dport 67 -j RETURN
-A %(bn)s-o_port2 ! -s 10.0.0.4 -j DROP
-A %(bn)s-o_port2 -p udp --sport 67 --dport 68 -j DROP
-A %(bn)s-o_port2 -m state --state INVALID -j DROP
-A %(bn)s-o_port2 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-o_port2 -j RETURN
-A %(bn)s-o_port2 -j %(bn)s-sg-fallback
-A %(bn)s-sg-chain -j ACCEPT
""" % IPTABLES_ARG
IPTABLES_FILTER_2_2 = """:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:neutron-filter-top - [0:0]
-A FORWARD -j neutron-filter-top
-A OUTPUT -j neutron-filter-top
-A neutron-filter-top -j %(bn)s-local
-A INPUT -j %(bn)s-INPUT
-A OUTPUT -j %(bn)s-OUTPUT
-A FORWARD -j %(bn)s-FORWARD
-A %(bn)s-sg-fallback -j DROP
-A %(bn)s-FORWARD %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-i_port1
-A %(bn)s-i_port1 -m state --state INVALID -j DROP
-A %(bn)s-i_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-i_port1 -j RETURN -p udp --dport 68 --sport 67 -s 10.0.0.2
-A %(bn)s-i_port1 -j RETURN -p tcp --dport 22
-A %(bn)s-i_port1 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-INPUT %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-o_port1 -m mac ! --mac-source 12:34:56:78:9a:bc -j DROP
-A %(bn)s-o_port1 -p udp --sport 68 --dport 67 -j RETURN
-A %(bn)s-o_port1 ! -s 10.0.0.3 -j DROP
-A %(bn)s-o_port1 -p udp --sport 67 --dport 68 -j DROP
-A %(bn)s-o_port1 -m state --state INVALID -j DROP
-A %(bn)s-o_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-o_port1 -j RETURN
-A %(bn)s-o_port1 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-INGRESS tap_port2 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-INGRESS tap_port2 -j %(bn)s-i_port2
-A %(bn)s-i_port2 -m state --state INVALID -j DROP
-A %(bn)s-i_port2 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-i_port2 -j RETURN -p udp --dport 68 --sport 67 -s 10.0.0.2
-A %(bn)s-i_port2 -j RETURN -p tcp --dport 22
-A %(bn)s-i_port2 -j RETURN -s 10.0.0.3
-A %(bn)s-i_port2 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-o_port2
-A %(bn)s-INPUT %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-o_port2
-A %(bn)s-o_port2 -m mac ! --mac-source 12:34:56:78:9a:bd -j DROP
-A %(bn)s-o_port2 -p udp --sport 68 --dport 67 -j RETURN
-A %(bn)s-o_port2 ! -s 10.0.0.4 -j DROP
-A %(bn)s-o_port2 -p udp --sport 67 --dport 68 -j DROP
-A %(bn)s-o_port2 -m state --state INVALID -j DROP
-A %(bn)s-o_port2 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-o_port2 -j RETURN
-A %(bn)s-o_port2 -j %(bn)s-sg-fallback
-A %(bn)s-sg-chain -j ACCEPT
""" % IPTABLES_ARG
IPTABLES_FILTER_2_3 = """:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:neutron-filter-top - [0:0]
-A FORWARD -j neutron-filter-top
-A OUTPUT -j neutron-filter-top
-A neutron-filter-top -j %(bn)s-local
-A INPUT -j %(bn)s-INPUT
-A OUTPUT -j %(bn)s-OUTPUT
-A FORWARD -j %(bn)s-FORWARD
-A %(bn)s-sg-fallback -j DROP
-A %(bn)s-FORWARD %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-i_port1
-A %(bn)s-i_port1 -m state --state INVALID -j DROP
-A %(bn)s-i_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-i_port1 -j RETURN -p udp --dport 68 --sport 67 -s 10.0.0.2
-A %(bn)s-i_port1 -j RETURN -p tcp --dport 22
-A %(bn)s-i_port1 -j RETURN -s 10.0.0.4
-A %(bn)s-i_port1 -j RETURN -p icmp
-A %(bn)s-i_port1 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-INPUT %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-o_port1 -m mac ! --mac-source 12:34:56:78:9a:bc -j DROP
-A %(bn)s-o_port1 -p udp --sport 68 --dport 67 -j RETURN
-A %(bn)s-o_port1 ! -s 10.0.0.3 -j DROP
-A %(bn)s-o_port1 -p udp --sport 67 --dport 68 -j DROP
-A %(bn)s-o_port1 -m state --state INVALID -j DROP
-A %(bn)s-o_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-o_port1 -j RETURN
-A %(bn)s-o_port1 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-INGRESS tap_port2 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-INGRESS tap_port2 -j %(bn)s-i_port2
-A %(bn)s-i_port2 -m state --state INVALID -j DROP
-A %(bn)s-i_port2 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-i_port2 -j RETURN -p udp --dport 68 --sport 67 -s 10.0.0.2
-A %(bn)s-i_port2 -j RETURN -p tcp --dport 22
-A %(bn)s-i_port2 -j RETURN -s 10.0.0.3
-A %(bn)s-i_port2 -j RETURN -p icmp
-A %(bn)s-i_port2 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-o_port2
-A %(bn)s-INPUT %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-o_port2
-A %(bn)s-o_port2 -m mac ! --mac-source 12:34:56:78:9a:bd -j DROP
-A %(bn)s-o_port2 -p udp --sport 68 --dport 67 -j RETURN
-A %(bn)s-o_port2 ! -s 10.0.0.4 -j DROP
-A %(bn)s-o_port2 -p udp --sport 67 --dport 68 -j DROP
-A %(bn)s-o_port2 -m state --state INVALID -j DROP
-A %(bn)s-o_port2 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-o_port2 -j RETURN
-A %(bn)s-o_port2 -j %(bn)s-sg-fallback
-A %(bn)s-sg-chain -j ACCEPT
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_EMPTY
IPTABLES_FILTER_EMPTY = """:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:neutron-filter-top - [0:0]
-A FORWARD -j neutron-filter-top
-A OUTPUT -j neutron-filter-top
-A neutron-filter-top -j %(bn)s-local
-A INPUT -j %(bn)s-INPUT
-A OUTPUT -j %(bn)s-OUTPUT
-A FORWARD -j %(bn)s-FORWARD
-A %(bn)s-sg-fallback -j DROP
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_1
IPTABLES_FILTER_V6_1 = """:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:neutron-filter-top - [0:0]
-A FORWARD -j neutron-filter-top
-A OUTPUT -j neutron-filter-top
-A neutron-filter-top -j %(bn)s-local
-A INPUT -j %(bn)s-INPUT
-A OUTPUT -j %(bn)s-OUTPUT
-A FORWARD -j %(bn)s-FORWARD
-A %(bn)s-sg-fallback -j DROP
-A %(bn)s-FORWARD %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-i_port1
-A %(bn)s-i_port1 -m state --state INVALID -j DROP
-A %(bn)s-i_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-i_port1 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-INPUT %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-o_port1 -m mac ! --mac-source 12:34:56:78:9a:bc -j DROP
-A %(bn)s-o_port1 -p icmpv6 -j RETURN
-A %(bn)s-o_port1 -m state --state INVALID -j DROP
-A %(bn)s-o_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-o_port1 -j %(bn)s-sg-fallback
-A %(bn)s-sg-chain -j ACCEPT
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_2
IPTABLES_FILTER_V6_2 = """:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:neutron-filter-top - [0:0]
-A FORWARD -j neutron-filter-top
-A OUTPUT -j neutron-filter-top
-A neutron-filter-top -j %(bn)s-local
-A INPUT -j %(bn)s-INPUT
-A OUTPUT -j %(bn)s-OUTPUT
-A FORWARD -j %(bn)s-FORWARD
-A %(bn)s-sg-fallback -j DROP
-A %(bn)s-FORWARD %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-INGRESS tap_port1 -j %(bn)s-i_port1
-A %(bn)s-i_port1 -m state --state INVALID -j DROP
-A %(bn)s-i_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-i_port1 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-INPUT %(physdev)s --physdev-EGRESS tap_port1 -j %(bn)s-o_port1
-A %(bn)s-o_port1 -m mac ! --mac-source 12:34:56:78:9a:bc -j DROP
-A %(bn)s-o_port1 -p icmpv6 -j RETURN
-A %(bn)s-o_port1 -m state --state INVALID -j DROP
-A %(bn)s-o_port1 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-o_port1 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-INGRESS tap_port2 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-INGRESS tap_port2 -j %(bn)s-i_port2
-A %(bn)s-i_port2 -m state --state INVALID -j DROP
-A %(bn)s-i_port2 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-i_port2 -j %(bn)s-sg-fallback
-A %(bn)s-FORWARD %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-sg-chain
-A %(bn)s-sg-chain %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-o_port2
-A %(bn)s-INPUT %(physdev)s --physdev-EGRESS tap_port2 -j %(bn)s-o_port2
-A %(bn)s-o_port2 -m mac ! --mac-source 12:34:56:78:9a:bd -j DROP
-A %(bn)s-o_port2 -p icmpv6 -j RETURN
-A %(bn)s-o_port2 -m state --state INVALID -j DROP
-A %(bn)s-o_port2 -m state --state ESTABLISHED,RELATED -j RETURN
-A %(bn)s-o_port2 -j %(bn)s-sg-fallback
-A %(bn)s-sg-chain -j ACCEPT
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_EMPTY
IPTABLES_FILTER_V6_EMPTY = """:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:neutron-filter-top - [0:0]
-A FORWARD -j neutron-filter-top
-A OUTPUT -j neutron-filter-top
-A neutron-filter-top -j %(bn)s-local
-A INPUT -j %(bn)s-INPUT
-A OUTPUT -j %(bn)s-OUTPUT
-A FORWARD -j %(bn)s-FORWARD
-A %(bn)s-sg-fallback -j DROP
""" % IPTABLES_ARG
FIREWALL_BASE_PACKAGE = 'neutron.agent.linux.iptables_firewall.'
FIREWALL_IPTABLES_DRIVER = FIREWALL_BASE_PACKAGE + 'IptablesFirewallDriver'
FIREWALL_HYBRID_DRIVER = (FIREWALL_BASE_PACKAGE +
'OVSHybridIptablesFirewallDriver')
FIREWALL_NOOP_DRIVER = 'neutron.agent.firewall.NoopFirewallDriver'
def set_firewall_driver(firewall_driver):
cfg.CONF.set_override('firewall_driver', firewall_driver,
group='SECURITYGROUP')
class TestSecurityGroupAgentWithIptables(base.BaseTestCase):
FIREWALL_DRIVER = FIREWALL_IPTABLES_DRIVER
PHYSDEV_INGRESS = 'physdev-out'
PHYSDEV_EGRESS = 'physdev-in'
def setUp(self):
super(TestSecurityGroupAgentWithIptables, self).setUp()
self.mox = mox.Mox()
agent_opts = [
cfg.StrOpt('root_helper', default='sudo'),
]
cfg.CONF.register_opts(agent_opts, "AGENT")
cfg.CONF.set_override(
'firewall_driver',
self.FIREWALL_DRIVER,
group='SECURITYGROUP')
self.addCleanup(mock.patch.stopall)
self.addCleanup(self.mox.UnsetStubs)
self.agent = sg_rpc.SecurityGroupAgentRpcMixin()
self.agent.context = None
self.root_helper = 'sudo'
self.agent.root_helper = 'sudo'
self.agent.init_firewall()
self.iptables = self.agent.firewall.iptables
self.mox.StubOutWithMock(self.iptables, "execute")
self.rpc = mock.Mock()
self.agent.plugin_rpc = self.rpc
rule1 = [{'direction': 'ingress',
'protocol': 'udp',
'ethertype': 'IPv4',
'source_ip_prefix': '10.0.0.2',
'source_port_range_min': 67,
'source_port_range_max': 67,
'port_range_min': 68,
'port_range_max': 68},
{'direction': 'ingress',
'protocol': 'tcp',
'ethertype': 'IPv4',
'port_range_min': 22,
'port_range_max': 22},
{'direction': 'egress',
'ethertype': 'IPv4'}]
rule2 = rule1[:]
rule2 += [{'direction': 'ingress',
'source_ip_prefix': '10.0.0.4',
'ethertype': 'IPv4'}]
rule3 = rule2[:]
rule3 += [{'direction': 'ingress',
'protocol': 'icmp',
'ethertype': 'IPv4'}]
rule4 = rule1[:]
rule4 += [{'direction': 'ingress',
'source_ip_prefix': '10.0.0.3',
'ethertype': 'IPv4'}]
rule5 = rule4[:]
rule5 += [{'direction': 'ingress',
'protocol': 'icmp',
'ethertype': 'IPv4'}]
self.devices1 = {'tap_port1': self._device('tap_port1',
'10.0.0.3',
'12:34:56:78:9a:bc',
rule1)}
self.devices2 = {'tap_port1': self._device('tap_port1',
'10.0.0.3',
'12:34:56:78:9a:bc',
rule2),
'tap_port2': self._device('tap_port2',
'10.0.0.4',
'12:34:56:78:9a:bd',
rule4)}
self.devices3 = {'tap_port1': self._device('tap_port1',
'10.0.0.3',
'12:34:56:78:9a:bc',
rule3),
'tap_port2': self._device('tap_port2',
'10.0.0.4',
'12:34:56:78:9a:bd',
rule5)}
def _device(self, device, ip, mac_address, rule):
return {'device': device,
'fixed_ips': [ip],
'mac_address': mac_address,
'security_groups': ['security_group1'],
'security_group_rules': rule,
'security_group_source_groups': [
'security_group1']}
def _regex(self, value):
value = value.replace('physdev-INGRESS', self.PHYSDEV_INGRESS)
value = value.replace('physdev-EGRESS', self.PHYSDEV_EGRESS)
value = value.replace('\n', '\\n')
value = value.replace('[', '\[')
value = value.replace(']', '\]')
return mox.Regex(value)
def _replay_iptables(self, v4_filter, v6_filter):
self.iptables.execute(
['iptables-save', '-t', 'filter'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(
['iptables-restore'],
process_input=self._regex(v4_filter),
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(
['iptables-save', '-t', 'nat'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(
['iptables-restore'],
process_input=self._regex(IPTABLES_NAT),
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(
['ip6tables-save', '-t', 'filter'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(
['ip6tables-restore'],
process_input=self._regex(v6_filter),
root_helper=self.root_helper).AndReturn('')
def test_prepare_remove_port(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY)
self.mox.ReplayAll()
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self.mox.VerifyAll()
def test_security_group_member_updated(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1)
self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1)
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2)
self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2)
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY)
self.mox.ReplayAll()
self.agent.prepare_devices_filter(['tap_port1'])
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self.mox.VerifyAll()
def test_security_group_rule_udpated(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2)
self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2)
self.mox.ReplayAll()
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.rpc.security_group_rules_for_devices.return_value = self.devices3
self.agent.security_groups_rule_updated(['security_group1'])
self.mox.VerifyAll()
class SGNotificationTestMixin():
def test_security_group_rule_updated(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
with self.security_group(name, description) as sg2:
security_group_id = sg['security_group']['id']
direction = "ingress"
remote_group_id = sg2['security_group']['id']
protocol = 'tcp'
port_range_min = 88
port_range_max = 88
with self.security_group_rule(security_group_id, direction,
protocol, port_range_min,
port_range_max,
remote_group_id=remote_group_id
):
pass
self.notifier.assert_has_calls(
[call.security_groups_rule_updated(mock.ANY,
[security_group_id]),
call.security_groups_rule_updated(mock.ANY,
[security_group_id])])
def test_security_group_member_updated(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
security_group_id = sg['security_group']['id']
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[security_group_id]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
security_group_id)
self._delete('ports', port['port']['id'])
self.notifier.assert_has_calls(
[call.security_groups_member_updated(
mock.ANY, [mock.ANY]),
call.security_groups_member_updated(
mock.ANY, [security_group_id])])
class TestSecurityGroupAgentWithOVSIptables(
TestSecurityGroupAgentWithIptables):
FIREWALL_DRIVER = FIREWALL_HYBRID_DRIVER
def _regex(self, value):
#Note(nati): tap is prefixed on the device
# in the OVSHybridIptablesFirewallDriver
value = value.replace('tap_port', 'taptap_port')
value = value.replace('o_port', 'otap_port')
value = value.replace('i_port', 'itap_port')
return super(
TestSecurityGroupAgentWithOVSIptables,
self)._regex(value)
class TestSecurityGroupExtensionControl(base.BaseTestCase):
def test_firewall_enabled_noop_driver(self):
set_firewall_driver(FIREWALL_NOOP_DRIVER)
self.assertFalse(sg_rpc.is_firewall_enabled())
def test_firewall_enabled_iptables_driver(self):
set_firewall_driver(FIREWALL_IPTABLES_DRIVER)
self.assertTrue(sg_rpc.is_firewall_enabled())
def test_disable_security_group_extension_noop_driver(self):
set_firewall_driver(FIREWALL_NOOP_DRIVER)
exp_aliases = ['dummy1', 'dummy2']
ext_aliases = ['dummy1', 'security-group', 'dummy2']
sg_rpc.disable_security_group_extension_if_noop_driver(ext_aliases)
self.assertEqual(ext_aliases, exp_aliases)
def test_disable_security_group_extension_iptables_driver(self):
set_firewall_driver(FIREWALL_IPTABLES_DRIVER)
exp_aliases = ['dummy1', 'security-group', 'dummy2']
ext_aliases = ['dummy1', 'security-group', 'dummy2']
sg_rpc.disable_security_group_extension_if_noop_driver(ext_aliases)
self.assertEqual(ext_aliases, exp_aliases)
| ykaneko/neutron | neutron/tests/unit/test_security_groups_rpc.py | Python | apache-2.0 | 56,457 |
#! /usr/bin/python3
# -*- coding:Utf-8 -*-
"""
MyNotes - Sticky notes/post-it
Copyright 2016-2019 Juliette Monsel <j_4321@protonmail.com>
MyNotes is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MyNotes is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Sticky note class
"""
import os
import re
from tkinter import Toplevel, StringVar, Menu, TclError
from tkinter.ttk import Style, Sizegrip, Entry, Label, Button, Frame
from time import strftime
from PIL.ImageTk import PhotoImage
from PIL import Image
from mynoteslib.constants import TEXT_COLORS, COLORS, EWMH, PATH_LATEX, LATEX,\
CONFIG, IM_LOCK, IM_CLIP, sorting, math_to_image, open_url, askopenfilename
from mynoteslib.autoscrollbar import AutoScrollbar
from mynoteslib.symbols import pick_symbol
from mynoteslib.mytext import MyText
from mynoteslib.messagebox import showerror, askokcancel
class Sticky(Toplevel):
"""Sticky note class."""
def __init__(self, master, key, **kwargs):
"""
Create a new sticky note.
Arguments:
master: main app
key: key identifying this note in master.note_data
kwargs: dictionnary of the other arguments
(title, txt, category, color, tags, geometry, locked, checkboxes, images, rolled)
"""
Toplevel.__init__(self, master, class_='MyNotes')
self.withdraw()
self.x = None
self.y = None
# --- window properties
self.id = key
self._date = kwargs.get('date', '??')
self.is_locked = not (kwargs.get("locked", False))
self.images = []
self.links_click_id = {} # delay click effect to avoid triggering <1> with <Double-1>
self.files = {}
self.files_click_id = {} # delay click effect to avoid triggering <1> with <Double-1>
self.nb_links = 0
self.nb_files = 0
self.title('mynotes%s' % key)
self.protocol("WM_DELETE_WINDOW", self.hide)
if CONFIG.getboolean('General', 'splash_supported', fallback=True):
self.attributes('-type', 'splash')
else:
self.attributes('-type', 'toolbar')
self.attributes("-alpha", CONFIG.getint("General", "opacity") / 100)
self.rowconfigure(1, weight=1)
self.minsize(10, 10)
# --- style
self.style = Style(self)
self.style.configure(self.id + ".TCheckbutton", selectbackground="red")
selectbg = self.style.lookup('TEntry', 'selectbackground', ('focus',))
# --- note elements
# -------- titlebar
self.titlebar = Frame(self, style=self.id + '.TFrame')
# title
font_title = "%s %s" % (CONFIG.get("Font", "title_family").replace(" ", "\ "),
CONFIG.get("Font", "title_size"))
style = CONFIG.get("Font", "title_style").split(",")
if style:
font_title = font_title + " " + " ".join(style)
self.title_var = StringVar(master=self,
value=kwargs.get("title", _("Title")))
self.title_label = Label(self.titlebar,
textvariable=self.title_var,
anchor="e",
borderwidth=0,
style=self.id + ".TLabel",
font=font_title)
self.date_label = Label(self.titlebar,
text="",
anchor="w",
borderwidth=0,
style=self.id + ".TLabel",
font=font_title)
self.title_entry = Entry(self.titlebar, textvariable=self.title_var,
exportselection=False,
justify="center", font=font_title)
# buttons/icons
self.roll = Label(self.titlebar, image="img_roll", style=self.id + ".TLabel")
self.close = Label(self.titlebar, image="img_close", style=self.id + ".TLabel")
self.im_lock = PhotoImage(master=self, file=IM_LOCK)
im_unlock = Image.new('RGBA',
(self.im_lock.width(), self.im_lock.height()),
(0, 0, 0, 0))
self.im_unlock = PhotoImage(im_unlock, master=self)
self.im_clip = PhotoImage(master=self, file=IM_CLIP)
self.cadenas = Label(self.titlebar, style=self.id + ".TLabel")
if CONFIG.get("General", "buttons_position") == "right":
# right = lock icon - title - roll - close
self.titlebar.columnconfigure(1, weight=1)
self.titlebar.columnconfigure(2, weight=1)
self.roll.grid(row=0, column=3, sticky="e")
self.close.grid(row=0, column=4, sticky="e", padx=(0, 2))
self.cadenas.grid(row=0, column=0, sticky="w")
self.title_label.grid(row=0, column=1, sticky="ew", pady=(1, 0))
self.date_label.grid(row=0, column=2, sticky="ew", pady=(1, 0))
else:
# left = close - roll - title - lock icon
self.titlebar.columnconfigure(2, weight=1)
self.titlebar.columnconfigure(3, weight=1)
self.roll.grid(row=0, column=1, sticky="w")
self.close.grid(row=0, column=0, sticky="w", padx=(2, 0))
self.cadenas.grid(row=0, column=4, sticky="e")
self.title_label.grid(row=0, column=2, sticky="ew", pady=(1, 0))
self.date_label.grid(row=0, column=3, sticky="ew", pady=(1, 0))
if CONFIG.getboolean('General', 'date_in_title', fallback=True):
self.date_label.configure(text=' - ' + self._date)
# -------- body
# corner grip
self.corner = Sizegrip(self, style=self.id + ".TSizegrip")
# texte
self.scroll = AutoScrollbar(self, orient='vertical')
self.txt = MyText(self, cb_style=self.id + ".TCheckbutton",
selectforeground='white',
inactiveselectbackground=selectbg,
selectbackground=selectbg,
yscrollcommand=self.scroll.set)
self.scroll.configure(command=self.txt.yview)
# --- menus
# --- * menu on title
self.menu = Menu(self, tearoff=False)
# note color
menu_note_color = Menu(self.menu, tearoff=False)
colors = list(COLORS.keys())
colors.sort()
for coul in colors:
menu_note_color.add_command(label=coul, image=self.master.im_color[coul],
compound='left',
command=lambda key=coul: self.change_color(key))
# category
self.category = StringVar(self, kwargs.get("category",
CONFIG.get("General",
"default_category")))
self.menu_categories = Menu(self.menu, tearoff=False)
categories = CONFIG.options("Categories")
categories.sort()
for cat in categories:
self.menu_categories.add_radiobutton(label=cat.capitalize(),
value=cat,
variable=self.category,
command=self.change_category)
# position: normal, always above, always below
self.position = StringVar(self,
kwargs.get("position",
CONFIG.get("General", "position")))
menu_position = Menu(self.menu, tearoff=False)
menu_position.add_radiobutton(label=_("Always above"),
value="above",
variable=self.position,
command=self.set_position_above)
menu_position.add_radiobutton(label=_("Always below"),
value="below",
variable=self.position,
command=self.set_position_below)
menu_position.add_radiobutton(label=_("Normal"),
value="normal",
variable=self.position,
command=self.set_position_normal)
# mode: note, list, todo list
menu_mode = Menu(self.menu, tearoff=False)
self.mode = StringVar(self, kwargs.get("mode", "note"))
menu_mode.add_radiobutton(label=_("Note"), value="note",
variable=self.mode,
command=self.set_mode_note)
menu_mode.add_radiobutton(label=_("List"), value="list",
variable=self.mode,
command=self.set_mode_list)
menu_mode.add_radiobutton(label=_("ToDo List"), value="todolist",
variable=self.mode,
command=self.set_mode_todolist)
menu_mode.add_radiobutton(label=_("Enumeration"), value="enum",
variable=self.mode,
command=self.set_mode_enum)
self.menu.add_command(label=_("Delete"), command=self.delete)
self.menu.add_cascade(label=_("Category"), menu=self.menu_categories)
self.menu.add_cascade(label=_("Color"), menu=menu_note_color)
self.menu.add_command(label=_("Lock"), command=self.lock)
self.menu.add_cascade(label=_("Position"), menu=menu_position)
self.menu.add_cascade(label=_("Mode"), menu=menu_mode)
# --- * menu on main text
self.menu_txt = Menu(self.txt, tearoff=False)
# style
menu_style = Menu(self.menu_txt, tearoff=False)
menu_style.add_command(label=_("Bold"),
command=lambda: self.txt.toggle_text_style("bold"),
accelerator='Ctrl+B')
menu_style.add_command(label=_("Italic"),
command=lambda: self.txt.toggle_text_style("italic"),
accelerator='Ctrl+I')
menu_style.add_command(label=_("Underline"),
command=self.txt.toggle_underline,
accelerator='Ctrl+U')
menu_style.add_command(label=_("Overstrike"),
command=self.txt.toggle_overstrike)
menu_style.add_command(label=_("Mono"),
command=lambda: self.txt.toggle_text_style("mono"),
accelerator='Ctrl+M')
# text alignment
menu_align = Menu(self.menu_txt, tearoff=False)
menu_align.add_command(label=_("Left"),
command=lambda: self.txt.set_align("left"),
accelerator='Ctrl+L')
menu_align.add_command(label=_("Right"),
command=lambda: self.txt.set_align("right"),
accelerator='Ctrl+R')
menu_align.add_command(label=_("Center"),
command=lambda: self.txt.set_align("center"))
# text color
menu_colors = Menu(self.menu_txt, tearoff=False)
colors = list(TEXT_COLORS.keys())
colors.sort()
for coul in colors:
menu_colors.add_command(label=coul, image=self.master.im_text_color[coul],
compound='left',
command=lambda key=coul: self.txt.change_sel_color(TEXT_COLORS[key]))
# insert
menu_insert = Menu(self.menu_txt, tearoff=False)
menu_insert.add_command(label=_("Symbols"), command=self.add_symbols,
accelerator='Ctrl+S')
menu_insert.add_command(label=_("Checkbox"), command=self.add_checkbox,
accelerator='Ctrl+O')
menu_insert.add_command(label=_("Image"), command=self.add_image)
menu_insert.add_command(label=_("Date"), command=self.add_date,
accelerator='Ctrl+D')
menu_insert.add_command(label=_("Link"), command=self.add_link,
accelerator='Ctrl+H')
if LATEX:
menu_insert.add_command(label="LaTeX", command=self.add_latex,
accelerator='Ctrl+T')
self.menu_txt.add_cascade(label=_("Style"), menu=menu_style)
self.menu_txt.add_cascade(label=_("Alignment"), menu=menu_align)
self.menu_txt.add_cascade(label=_("Color"), menu=menu_colors)
self.menu_txt.add_cascade(label=_("Insert"), menu=menu_insert)
# --- restore note content/appearence
self.color = kwargs.get("color",
CONFIG.get("Categories", self.category.get()))
self.txt.insert('1.0', kwargs.get("txt", ""))
self.txt.edit_reset() # clear undo stack
# restore inserted objects (images and checkboxes)
# we need to restore objects with increasing index to avoid placment errors
indexes = list(kwargs.get("inserted_objects", {}).keys())
indexes.sort(key=sorting)
latex_data = kwargs.get("latex", {})
for index in indexes:
kind, val = kwargs["inserted_objects"][index]
if kind == "checkbox":
if val:
state = ('selected', '!alternate')
else:
state = ('!selected', '!alternate')
self.txt.checkbox_create(index, state)
elif kind == "image":
if os.path.exists(val):
self.images.append(PhotoImage(master=self.txt, file=val))
self.txt.image_create(index,
image=self.images[-1],
align='bottom',
name=val)
else:
path, img = os.path.split(val)
if LATEX and path == PATH_LATEX and img in latex_data:
math_to_image(latex_data[img], val,
fontsize=CONFIG.getint("Font", "text_size") - 2)
self.images.append(PhotoImage(file=val, master=self))
self.txt.image_create(index, image=self.images[-1],
align='bottom', name=val)
self.txt.tag_add(img, index)
# restore tags
for tag, indices in kwargs.get("tags", {}).items():
if indices:
self.txt.tag_add(tag, *indices)
# restore links
links = kwargs.get("links", {})
for link_nb, link in links.items():
self.txt.links[link_nb] = link
self.links_click_id[link_nb] = ""
lid = "link#%i" % link_nb
self.txt.tag_bind(lid,
"<Button-1>",
lambda e, lnb=link_nb: self.open_link(lnb))
self.txt.tag_bind(lid,
"<Double-Button-1>",
lambda e, lnb=link_nb: self.edit_link(lnb))
if links:
self.nb_links = max(links)
# restore latex
for img, latex in latex_data.items():
self.txt.latex[img] = latex
if LATEX:
self.txt.tag_bind(img, '<Double-Button-1>',
lambda e, im=img: self.add_latex(im))
mode = self.mode.get()
if mode != "note":
self.txt.tag_add(mode, "1.0", "end")
self.txt.mode = mode
# --- placement
self.columnconfigure(0, weight=1)
# titlebar
self.titlebar.grid(row=0, column=0, columnspan=2, sticky='ew')
# body
self.txt.grid(row=1, column=0, sticky="ewsn",
pady=(1, 4), padx=4)
self.scroll.grid(row=1, column=1, sticky='ns', pady=(2, 14))
self.corner.lift(self.txt)
self.corner.place(relx=1.0, rely=1.0, anchor="se")
# --- bindings
self.bind("<FocusOut>", self.save_note)
self.bind('<Button-1>', self.change_focus, True)
self.close.bind("<Button-1>", self.hide)
self.close.bind("<Enter>", self.enter_close)
self.close.bind("<Leave>", self.leave_close)
self.roll.bind("<Button-1>", self.rollnote)
self.roll.bind("<Enter>", self.enter_roll)
self.roll.bind("<Leave >", self.leave_roll)
self.title_label.bind("<Double-Button-1>", self.edit_title)
self.title_label.bind("<ButtonPress-1>", self.start_move)
self.title_label.bind("<ButtonRelease-1>", self.stop_move)
self.title_label.bind("<B1-Motion>", self.move)
self.title_label.bind('<Button-3>', self.show_menu)
self.title_label.bind('<Button-4>', self.mouse_roll)
self.title_label.bind('<Button-5>', self.mouse_roll)
self.date_label.bind("<Double-Button-1>", self.edit_title)
self.date_label.bind("<ButtonPress-1>", self.start_move)
self.date_label.bind("<ButtonRelease-1>", self.stop_move)
self.date_label.bind("<B1-Motion>", self.move)
self.date_label.bind('<Button-3>', self.show_menu)
self.date_label.bind('<Button-4>', self.mouse_roll)
self.date_label.bind('<Button-5>', self.mouse_roll)
self.title_entry.bind("<Return>", lambda e: self.title_entry.place_forget())
self.title_entry.bind("<FocusOut>", lambda e: self.title_entry.place_forget())
self.title_entry.bind("<Escape>", lambda e: self.title_entry.place_forget())
self.txt.bind("<FocusOut>", self.save_note)
self.txt.bind('<Button-3>', self.show_menu_txt)
self.corner.bind('<ButtonRelease-1>', self.resize)
# --- keyboard shortcuts
self.txt.bind('<Control-b>', lambda e: self.txt.toggle_text_style('bold'))
self.txt.bind('<Control-i>', lambda e: self.txt.toggle_text_style('italic'))
self.txt.bind('<Control-m>', lambda e: self.txt.toggle_text_style('mono'))
self.txt.bind('<Control-u>', lambda e: self.txt.toggle_underline())
self.txt.bind('<Control-r>', lambda e: self.txt.set_align('right'))
self.txt.bind('<Control-l>', lambda e: self.txt.set_align('left'))
self.txt.bind('<Control-s>', lambda e: self.add_symbols())
self.txt.bind('<Control-d>', self.add_date)
self.txt.bind('<Control-o>', self.add_checkbox)
self.txt.bind('<Control-h>', lambda e: self.add_link())
if LATEX:
self.txt.bind('<Control-t>', lambda e: self.add_latex())
# --- window geometry
self.update_idletasks()
self.geometry(kwargs.get("geometry", '220x235'))
self.save_geometry = kwargs.get("geometry", '220x235')
self.deiconify()
self.update_idletasks()
self.focus_force()
self.txt.focus_set()
self.lock()
if kwargs.get("rolled", False):
self.rollnote()
if self.position.get() == "above":
self.set_position_above()
elif self.position.get() == "below":
self.set_position_below()
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
if name == "color":
self.style.configure(self.id + ".TSizegrip",
background=self.color)
self.style.map(self.id + ".TSizegrip",
background=[('disabled', self.color)])
self.style.configure(self.id + ".TLabel",
background=self.color)
self.style.configure(self.id + ".TFrame",
background=self.color)
self.style.configure("close" + self.id + ".TLabel",
background=self.color)
self.style.configure("roll" + self.id + ".TLabel",
background=self.color)
self.style.map(self.id + ".TLabel",
background=[("active", self.color)])
self.style.configure(self.id + ".TCheckbutton",
background=self.color)
self.style.map(self.id + ".TCheckbutton",
background=[("active", self.color),
("disabled", self.color)])
self.style.map("close" + self.id + ".TLabel",
background=[("active", self.color)])
self.style.map("roll" + self.id + ".TLabel",
background=[("active", self.color)])
self.scroll.configure(style='%s.Vertical.TScrollbar' % value)
self.configure(bg=self.color)
self.txt.configure(bg=self.color)
def delete(self, confirmation=True):
"""Delete this note."""
if confirmation:
rep = askokcancel(_("Confirmation"), _("Delete the note?"))
else:
rep = True
if rep:
del(self.master.note_data[self.id])
del(self.master.notes[self.id])
self.master.save()
self.destroy()
def lock(self):
"""Put note in read-only mode to avoid unwanted text insertion and lock position."""
if self.is_locked:
selectbg = self.style.lookup('TEntry', 'selectbackground', ('focus',))
self.txt.configure(state="normal",
selectforeground='white',
cursor='xterm',
selectbackground=selectbg,
inactiveselectbackground=selectbg)
self.corner.state(('!disabled',))
self.corner.configure(cursor='bottom_right_corner')
self.style.configure("sel.%s.TCheckbutton" % self.id, background=selectbg)
self.style.map("sel.%s.TCheckbutton" % self.id, background=[("active", selectbg)])
self.is_locked = False
for checkbox in self.txt.window_names():
ch = self.txt.children[checkbox.split(".")[-1]]
ch.configure(state="normal")
self.cadenas.configure(image=self.im_unlock)
self.menu.entryconfigure(3, label=_("Lock"))
self.title_label.bind("<Double-Button-1>", self.edit_title)
self.txt.bind('<Button-3>', self.show_menu_txt)
else:
self.txt.configure(state="disabled",
cursor='arrow',
selectforeground='black',
inactiveselectbackground='#c3c3c3',
selectbackground='#c3c3c3')
self.style.configure("sel.%s.TCheckbutton" % self.id, background='#c3c3c3')
self.style.map("sel.%s.TCheckbutton" % self.id, background=[("active", '#c3c3c3')])
self.cadenas.configure(image=self.im_lock)
self.corner.state(('disabled',))
self.corner.configure(cursor='arrow')
for checkbox in self.txt.window_names():
ch = self.txt.children[checkbox.split(".")[-1]]
ch.configure(state="disabled")
self.is_locked = True
self.menu.entryconfigure(3, label=_("Unlock"))
self.title_label.unbind("<Double-Button-1>")
self.txt.unbind('<Button-3>')
self.save_note()
def save_info(self):
"""Return the dictionnary containing all the note data."""
data = {}
data["txt"] = self.txt.get("1.0", "end")[:-1]
data["tags"] = {}
for tag in self.txt.tag_names():
if tag not in ["sel", "todolist", "list", "enum"]:
data["tags"][tag] = [index.string for index in self.txt.tag_ranges(tag)]
data["title"] = self.title_var.get()
data["date"] = self._date
data["geometry"] = self.save_geometry
data["category"] = self.category.get()
data["color"] = self.color
data["locked"] = self.is_locked
data["mode"] = self.mode.get()
data["inserted_objects"] = {}
data["rolled"] = not self.txt.winfo_ismapped()
data["position"] = self.position.get()
data["links"] = {}
for i, link in self.txt.links.items():
if self.txt.tag_ranges("link#%i" % i):
data["links"][i] = link
data["latex"] = {}
for img, latex in self.txt.latex.items():
if self.txt.tag_ranges(img):
data["latex"][img] = latex
for image in self.txt.image_names():
data["inserted_objects"][self.txt.index(image)] = ("image",
image.split('#')[0])
for checkbox in self.txt.window_names():
ch = self.txt.children[checkbox.split(".")[-1]]
data["inserted_objects"][self.txt.index(checkbox)] = ("checkbox", "selected" in ch.state())
return data
def change_color(self, key):
"""Change the color of the note."""
self.color = COLORS[key]
self.save_note()
def change_category(self, category=None):
"""Change the category of the note if provided and update its color."""
if category:
self.category.set(category)
self.color = CONFIG.get("Categories", self.category.get())
self.save_note()
def set_position_above(self):
"""Make note always above the other windows."""
self.attributes('-type', 'dock')
self.focus_force()
self.update_idletasks()
w = EWMH.getActiveWindow()
if w is None or w.get_wm_name() != 'mynotes%s' % self.id:
cl = EWMH.getClientList()
i = 0
n = len(cl)
while i < n and cl[i].get_wm_name() != 'mynotes%s' % self.id:
i += 1
if i < n:
w = cl[i]
else:
w = None
if w:
EWMH.setWmState(w, 1, '_NET_WM_STATE_ABOVE')
EWMH.setWmState(w, 0, '_NET_WM_STATE_BELOW')
EWMH.display.flush()
if not CONFIG.getboolean('General', 'splash_supported', fallback=True):
self.withdraw()
self.deiconify()
self.save_note()
def set_position_below(self):
"""Make note always below the other windows."""
self.attributes('-type', 'desktop')
self.focus_force()
self.update_idletasks()
w = EWMH.getActiveWindow()
if w is None or w.get_wm_name() != 'mynotes%s' % self.id:
cl = EWMH.getClientList()
i = 0
n = len(cl)
while i < n and cl[i].get_wm_name() != 'mynotes%s' % self.id:
i += 1
if i < n:
w = cl[i]
else:
w = None
if w:
EWMH.setWmState(w, 0, '_NET_WM_STATE_ABOVE')
EWMH.setWmState(w, 1, '_NET_WM_STATE_BELOW')
EWMH.display.flush()
if not CONFIG.getboolean('General', 'splash_supported', fallback=True):
self.withdraw()
self.deiconify()
self.save_note()
def set_position_normal(self):
"""Make note be on top if active or behind the active window."""
self.focus_force()
self.update_idletasks()
w = EWMH.getActiveWindow()
if w is None or w.get_wm_name() != 'mynotes%s' % self.id:
cl = EWMH.getClientList()
i = 0
n = len(cl)
while i < n and cl[i].get_wm_name() != 'mynotes%s' % self.id:
i += 1
if i < n:
w = cl[i]
else:
w = None
if w:
EWMH.setWmState(w, 0, '_NET_WM_STATE_BELOW')
EWMH.setWmState(w, 0, '_NET_WM_STATE_ABOVE')
EWMH.display.flush()
if CONFIG.getboolean('General', 'splash_supported', fallback=True):
self.attributes('-type', 'splash')
else:
self.attributes('-type', 'toolbar')
self.withdraw()
self.deiconify()
self.save_note()
def set_mode_note(self):
"""Set mode to note (classic text input)."""
self.txt.add_undo_sep()
self.txt.mode_change('note')
tags = self.txt.tag_names('1.0')
end = int(self.txt.index("end").split(".")[0])
if "list" in tags:
self.txt.tag_remove_undoable("list", "1.0", "end")
for i in range(1, end):
if self.txt.get("%i.0" % i, "%i.3" % i) == "\t•\t":
self.txt.delete_undoable("%i.0" % i, "%i.3" % i)
elif "todolist" in tags:
self.txt.tag_remove_undoable("todolist", "1.0", "end")
for i in range(1, end):
try:
ch = self.txt.window_cget("%i.0" % i, "window")
self.txt.delete_undoable("%i.0" % i)
self.txt.children[ch.split('.')[-1]].destroy()
except TclError:
pass
elif "enum" in tags:
lines = self.txt.get("1.0", "end").splitlines()
self.txt.tag_remove_undoable("enum", "1.0", "end")
for i, l in zip(range(1, end), lines):
res = re.match('^\t[0-9]+\.\t', l)
if res:
self.txt.delete_undoable("%i.0" % i, "%i.%i" % (i, res.end()))
self.txt.add_undo_sep()
self.save_note()
def set_mode_list(self):
"""Set mode to list (bullet point list)."""
end = int(self.txt.index("end").split(".")[0])
self.txt.add_undo_sep()
self.txt.mode_change('list')
tags = self.txt.tag_names('1.0')
if "list" in tags:
return
elif "todolist" in tags:
self.txt.tag_remove_undoable("todolist", "1.0", "end")
for i in range(1, end):
# remove checkboxes
try:
ch = self.txt.window_cget("%i.0" % i, "window")
self.txt.delete_undoable("%i.0" % i)
self.txt.children[ch.split('.')[-1]].destroy()
except TclError:
# there is no checkbox
pass
if self.txt.get("%i.0" % i, "%i.3" % i) != "\t•\t":
self.txt.insert_undoable("%i.0" % i, "\t•\t")
elif "enum" in tags:
lines = self.txt.get("1.0", "end").splitlines()
self.txt.tag_remove_undoable("enum", "1.0", "end")
for i, l in zip(range(1, end), lines):
# remove enumeration
res = re.match('^\t[0-9]+\.\t', l)
if res:
self.txt.delete_undoable("%i.0" % i, "%i.%i" % (i, res.end()))
if self.txt.get("%i.0" % i, "%i.3" % i) != "\t•\t":
self.txt.insert_undoable("%i.0" % i, "\t•\t")
else:
for i in range(1, end):
if self.txt.get("%i.0" % i, "%i.3" % i) != "\t•\t":
self.txt.insert_undoable("%i.0" % i, "\t•\t")
self.txt.tag_add_undoable("list", "1.0", "end")
self.txt.add_undo_sep()
self.save_note()
def set_mode_enum(self):
"""Set mode to enum (enumeration)."""
self.txt.add_undo_sep()
self.txt.mode_change('enum')
end = int(self.txt.index("end").split(".")[0])
lines = self.txt.get("1.0", "end").splitlines()
tags = self.txt.tag_names('1.0')
if "enum" in tags:
return
elif "list" in tags:
self.txt.tag_remove_undoable("list", "1.0", "end")
for i, l in zip(range(1, end), lines):
if self.txt.get("%i.0" % i, "%i.3" % i) == "\t•\t":
self.txt.delete_undoable("%i.0" % i, "%i.3" % i)
if not re.match('^\t[0-9]+\.', l):
self.txt.insert_undoable("%i.0" % i, "\t0.\t")
elif "todolist" in tags:
self.txt.tag_remove_undoable("todolist", "1.0", "end")
for i, l in zip(range(1, end), lines):
# remove checkboxes
try:
ch = self.txt.window_cget("%i.0" % i, "window")
self.txt.delete_undoable("%i.0" % i)
self.txt.children[ch.split('.')[-1]].destroy()
except TclError:
# no checkbox
pass
if not re.match('^\t[0-9]+\.', l):
self.txt.insert_undoable("%i.0" % i, "\t0.\t")
else:
for i, l in zip(range(1, end), lines):
if not re.match('^\t[0-9]+\.', l):
self.txt.insert_undoable("%i.0" % i, "\t0.\t")
self.txt.tag_add_undoable("enum", "1.0", "end")
self.txt.update_enum()
self.txt.add_undo_sep()
self.save_note()
def set_mode_todolist(self):
"""Set mode to todolist (checkbox list)."""
end = int(self.txt.index("end").split(".")[0])
self.txt.add_undo_sep()
self.txt.mode_change('todolist')
tags = self.txt.tag_names('1.0')
if "todolist" in tags:
return
elif "list" in tags:
self.txt.tag_remove_undoable("list", "1.0", "end")
for i in range(1, end):
if self.txt.get("%i.0" % i, "%i.3" % i) == "\t•\t":
self.txt.delete_undoable("%i.0" % i, "%i.3" % i)
try:
self.txt.window_cget("%i.0" % i, "window")
except TclError:
self.txt.checkbox_create_undoable("%i.0" % i)
elif "enum" in tags:
lines = self.txt.get("1.0", "end").splitlines()
self.txt.tag_remove_undoable("enum", "1.0", "end")
for i, l in zip(range(1, end), lines):
res = re.match('^\t[0-9]+\.\t', l)
if res:
self.txt.delete_undoable("%i.0" % i, "%i.%i" % (i, res.end()))
try:
self.txt.window_cget("%i.0" % i, "window")
except TclError:
self.txt.checkbox_create_undoable("%i.0" % i)
else:
for i in range(1, end):
try:
self.txt.window_cget("%i.0" % i, "window")
except TclError:
self.txt.checkbox_create_undoable("%i.0" % i)
self.txt.tag_add_undoable("todolist", "1.0", "end")
self.txt.add_undo_sep()
self.save_note()
# --- bindings
def enter_roll(self, event):
"""Mouse is over the roll icon."""
self.roll.configure(image="img_rollactive")
def leave_roll(self, event):
"""Mouse leaves the roll icon."""
self.roll.configure(image="img_roll")
def enter_close(self, event):
"""Mouse is over the close icon."""
self.close.configure(image="img_closeactive")
def leave_close(self, event):
"""Mouse leaves the close icon."""
self.close.configure(image="img_close")
def change_focus(self, event):
"""
Set focus on note.
Because of the use of window type "splash" (necessary to remove window
decoration), it is necessary to force the focus in order to write inside
the Text widget.
"""
if not self.is_locked:
event.widget.focus_force()
def show_menu(self, event):
"""Show main menu."""
self.menu.tk_popup(event.x_root, event.y_root)
def show_menu_txt(self, event):
"""Show text menu."""
self.txt.mark_set("insert", "current") # put insert cursor beneath mouse
self.menu_txt.tk_popup(event.x_root, event.y_root)
def resize(self, event):
"""Save new note geometry after resizing."""
self.save_geometry = self.geometry()
def edit_title(self, event):
"""Show entry to edit title."""
self.title_entry.place(x=self.title_label.winfo_x() + 5,
y=self.title_label.winfo_y(),
anchor="nw",
width=self.title_label.winfo_width() + self.date_label.winfo_width() - 10)
self.title_entry.selection_range(0, 'end')
self.title_entry.focus_set()
def start_move(self, event):
"""Start moving the note."""
if not self.is_locked:
self.x = event.x
self.y = event.y
self.configure(cursor='fleur')
def stop_move(self, event):
"""Stop moving the note."""
self.x = None
self.y = None
self.configure(cursor='')
def move(self, event):
"""Make note follow cursor motion."""
if self.x is not None and self.y is not None:
deltax = event.x - self.x
deltay = event.y - self.y
x = self.winfo_x() + deltax
y = self.winfo_y() + deltay
geo = "+%s+%s" % (x, y)
self.geometry(geo)
self.save_geometry = self.save_geometry.split("+")[0] + geo
def save_note(self, event=None):
"""Save note."""
data = self.save_info()
data["visible"] = True
self.master.note_data[self.id] = data
self.master.save()
def mouse_roll(self, event):
if event.num == 5 and not self.txt.winfo_ismapped():
self.txt.grid(row=1, columnspan=4,
column=0, sticky="ewsn", pady=(1, 4), padx=4)
self.corner.place(relx=1.0, rely=1.0, anchor="se")
self.geometry(self.save_geometry)
elif event.num == 4 and self.txt.winfo_ismapped():
self.txt.grid_forget()
self.corner.place_forget()
self.geometry("%sx22" % self.winfo_width())
self.save_note()
def rollnote(self, event=None):
"""Roll/unroll note."""
if self.txt.winfo_ismapped():
self.txt.grid_forget()
self.corner.place_forget()
self.geometry("%sx22" % self.winfo_width())
else:
self.txt.grid(row=1, columnspan=4,
column=0, sticky="ewsn", pady=(1, 4), padx=4)
self.corner.place(relx=1.0, rely=1.0, anchor="se")
self.geometry(self.save_geometry)
self.save_note()
def hide(self, event=None):
"""Hide note (can be displayed again via app menu)."""
cat = self.category.get()
self.master.add_note_to_menu(self.id, self.title_var.get().strip(), cat)
data = self.save_info()
data["visible"] = False
self.master.note_data[self.id] = data
del(self.master.notes[self.id])
self.master.save()
self.destroy()
# --- Settings update
def update_position(self):
if self.position.get() == 'normal':
if CONFIG.getboolean('General', 'splash_supported', fallback=True):
self.attributes('-type', 'splash')
else:
self.attributes('-type', 'toolbar')
self.withdraw()
self.deiconify()
def update_title_font(self):
"""Update title font after configuration change."""
font_title = "%s %s" % (CONFIG.get("Font", "title_family").replace(" ", "\ "),
CONFIG.get("Font", "title_size"))
style = CONFIG.get("Font", "title_style").split(",")
if style:
font_title = font_title + " " + " ".join(style)
self.title_label.configure(font=font_title)
self.date_label.configure(font=font_title)
def update_text_font(self):
"""Update text font after configuration change."""
self.txt.update_font()
def update_menu_cat(self, categories):
"""Update the category submenu."""
self.menu_categories.delete(0, "end")
for cat in categories:
self.menu_categories.add_radiobutton(label=cat.capitalize(), value=cat,
variable=self.category,
command=self.change_category)
def update_titlebar(self):
"""Update title bar button order."""
if CONFIG.get("General", "buttons_position") == "right":
# right = lock icon - title - roll - close
self.titlebar.columnconfigure(1, weight=1)
self.titlebar.columnconfigure(2, weight=1)
self.roll.grid(row=0, column=3, sticky="e")
self.close.grid(row=0, column=4, sticky="e", padx=(0, 2))
self.cadenas.grid(row=0, column=0, sticky="w")
self.title_label.grid(row=0, column=1, sticky="ew", pady=(1, 0))
self.date_label.grid(row=0, column=2, sticky="ew", pady=(1, 0))
else:
# left = close - roll - title - lock icon
self.titlebar.columnconfigure(2, weight=1)
self.titlebar.columnconfigure(3, weight=1)
self.roll.grid(row=0, column=1, sticky="w")
self.close.grid(row=0, column=0, sticky="w", padx=(2, 0))
self.cadenas.grid(row=0, column=4, sticky="e")
self.title_label.grid(row=0, column=2, sticky="ew", pady=(1, 0))
self.date_label.grid(row=0, column=3, sticky="ew", pady=(1, 0))
if CONFIG.getboolean('General', 'date_in_title', fallback=True):
self.date_label.configure(text=' - ' + self._date)
else:
self.date_label.configure(text='')
# --- Text edition
# ---* --Link
def add_link(self, link_nb=None):
"""Insert link (URL or local file) in note."""
def local_file():
d, f = os.path.split(link.get())
file = askopenfilename("", [], d, initialfile=f)
if file:
link.delete(0, 'end')
link.insert(0, file)
def ok(event=None):
lien = link.get()
txt = text.get()
if lien:
self.txt.add_undo_sep()
if not txt:
txt = lien
if link_nb is None:
self.nb_links += 1
lnb = self.nb_links
lid = "link#%i" % lnb
else:
lnb = link_nb
lid = "link#%i" % lnb
if sel:
index = sel[0]
self.txt.delete_undoable(*sel)
else:
index = "insert"
tags = self.txt.tag_names(index) + ("link", lid)
self.txt.insert_undoable(index, txt, tags)
self.txt.link_create_undoable(lnb, lien)
self.txt.add_undo_sep()
self.txt.tag_bind(lid, "<Button-1>", lambda e: self.open_link(lnb))
self.txt.tag_bind(lid, "<Double-Button-1>", lambda e: self.edit_link(lnb))
top.destroy()
self.txt.focus_force()
if link_nb is None:
if self.txt.tag_ranges('sel'):
txt = self.txt.get('sel.first', 'sel.last')
sel = self.txt.index("sel.first"), self.txt.index("sel.last")
else:
txt = ''
sel = ()
link_txt = txt
else:
lid = "link#%i" % link_nb
txt = self.txt.get('%s.first' % lid, '%s.last' % lid)
link_txt = self.txt.links[link_nb]
sel = self.txt.index('%s.first' % lid), self.txt.index('%s.last' % lid)
self.txt.tag_add('sel', *sel)
top = Toplevel(self, class_='MyNotes')
top.withdraw()
top.transient(self)
top.update_idletasks()
top.geometry("+%i+%i" % top.winfo_pointerxy())
top.resizable(True, False)
top.title(_("Link"))
top.columnconfigure(1, weight=1)
link = Entry(top)
b_file = Button(top, image=self.im_clip, padding=0, command=local_file)
text = Entry(top)
text.insert(0, txt)
text.icursor("end")
link.insert(0, link_txt)
link.icursor("end")
Label(top, text=_("URL or file")).grid(row=0, column=0, sticky="e", padx=4, pady=4)
Label(top, text=_("Text")).grid(row=1, column=0, sticky="e", padx=4, pady=4)
link.grid(row=0, column=1, sticky="ew", padx=4, pady=4)
b_file.grid(row=0, column=2, padx=4, pady=4)
text.grid(row=1, column=1, sticky="ew", padx=4, pady=4)
Button(top, text="Ok", command=ok).grid(row=2, columnspan=3, padx=4, pady=4)
link.focus_set()
text.bind("<Return>", ok)
link.bind("<Return>", ok)
top.bind('<Escape>', lambda e: top.destroy())
top.deiconify()
top.update_idletasks()
top.grab_set()
def create_link(self, link):
self.nb_links += 1
lnb = self.nb_links
lid = "link#%i" % lnb
self.txt.link_create_undoable(lnb, link)
self.txt.tag_bind(lid, "<Button-1>", lambda e: self.open_link(lnb))
self.txt.tag_bind(lid, "<Double-Button-1>", lambda e: self.edit_link(lnb))
return lid
def open_link(self, link_nb):
"""Open link after small delay to avoid opening link on double click."""
lien = self.txt.links[link_nb]
self.links_click_id[link_nb] = self.after(500, lambda: open_url(lien))
def edit_link(self, link_nb):
"""Edit link number link_nb."""
# cancel link opening
self.after_cancel(self.links_click_id[link_nb])
if not self.is_locked:
self.add_link(link_nb)
# ---* --Add other objects
def add_checkbox(self, event=None):
"""Insert checkbox in note."""
index = self.txt.index("insert")
self.txt.add_undo_sep()
self.txt.checkbox_create_undoable(index)
self.txt.add_undo_sep()
def add_date(self, event=None):
"""Insert today's date in note."""
self.txt.add_undo_sep()
self.txt.insert_undoable("insert", strftime("%x"))
self.txt.add_undo_sep()
def add_latex(self, img_name=None):
"""Insert image generated from latex expression given in the entry."""
if self.is_locked:
return
def ok(event):
latex = r'%s' % text.get()
if latex:
if img_name is None:
l = [int(os.path.splitext(f)[0]) for f in os.listdir(PATH_LATEX)]
l.sort()
if l:
i = l[-1] + 1
else:
i = 0
img = "%i.png" % i
self.txt.tag_bind(img, '<Double-Button-1>',
lambda e: self.add_latex(img))
else:
img = img_name
im = os.path.join(PATH_LATEX, img)
try:
math_to_image(latex, im, fontsize=CONFIG.getint("Font", "text_size") - 2)
self.images.append(PhotoImage(file=im, master=self))
self.txt.add_undo_sep()
if sel:
index = sel[0]
self.txt.delete_undoable(*sel)
else:
if img_name:
index = self.txt.index("current")
self.txt.delete_undoable(index)
else:
index = self.txt.index("insert")
self.txt.latex_create_undoable(index, img, self.images[-1], latex)
self.txt.tag_add_undoable(img, index)
self.txt.add_undo_sep()
top.destroy()
self.txt.focus_force()
except Exception as e:
showerror(_("Error"), str(e))
top = Toplevel(self, class_='MyNotes')
top.withdraw()
top.transient(self)
top.update_idletasks()
top.geometry("+%i+%i" % top.winfo_pointerxy())
top.resizable(True, False)
top.title("LaTeX")
text = Entry(top, justify='center')
if img_name is not None:
text.insert(0, self.txt.latex[img_name])
sel = ()
else:
if self.txt.tag_ranges('sel'):
sel = self.txt.index("sel.first"), self.txt.index("sel.last")
text.insert(0, '$')
text.insert('end', self.txt.get('sel.first', 'sel.last'))
text.insert('end', '$')
else:
sel = ()
text.insert(0, '$$')
text.icursor(1)
text.pack(fill='x', expand=True)
text.bind('<Return>', ok)
text.bind('<Escape>', lambda e: top.destroy())
text.focus_set()
top.deiconify()
top.update_idletasks()
top.grab_set()
def create_latex(self, latex, index):
l = [int(os.path.splitext(f)[0]) for f in os.listdir(PATH_LATEX)]
l.sort()
if l:
i = l[-1] + 1
else:
i = 0
img = "%i.png" % i
self.txt.tag_bind(img, '<Double-Button-1>',
lambda e: self.add_latex(img))
im = os.path.join(PATH_LATEX, img)
math_to_image(latex, im, fontsize=CONFIG.getint("Font", "text_size") - 2)
self.images.append(PhotoImage(file=im, master=self))
self.txt.latex_create_undoable(index, img, self.images[-1], latex)
self.txt.tag_add_undoable(img, index)
def add_image(self, event=None):
"""Insert image in note."""
fichier = askopenfilename(defaultextension="",
filetypes=[("PNG", "*.png"),
("JPEG", "*.jpg"),
("GIF", "*.gif"),
(_("All files"), "*")],
initialdir="",
initialfile="",
title=_('Select image'))
if os.path.exists(fichier):
try:
im = PhotoImage(master=self.txt, file=fichier)
except OSError:
showerror(_("Error"),
_("{file}: Unsupported image format.").format(file=fichier))
else:
self.images.append(im)
index = self.txt.index("insert")
self.txt.add_undo_sep()
self.txt.image_create_undoable(index,
align='bottom',
image=im,
name=fichier)
self.txt.add_undo_sep()
self.txt.focus_force()
elif fichier:
showerror(_("Error"), _("{file} does not exist.").format(file=fichier))
def add_symbols(self):
"""Insert symbol in note."""
symbols = pick_symbol(self,
CONFIG.get("Font", "text_family").replace(" ", "\ "),
CONFIG.get("General", "symbols"),
class_='MyNotes')
self.txt.add_undo_sep()
self.txt.insert_undoable("insert", symbols)
self.txt.add_undo_sep()
| j4321/MyNotes | mynoteslib/sticky.py | Python | gpl-3.0 | 52,023 |
import logging
import os
from pathlib import Path
from typing import Callable, List, Optional
from qgis.core import QgsCoordinateReferenceSystem, QgsProject
from qgis.gui import QgsProjectionSelectionWidget
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import (
QCheckBox,
QDialog,
QFileDialog,
QLineEdit,
QListWidget,
QListWidgetItem,
QPushButton,
QRadioButton,
QWidget,
)
from qgis.utils import pluginDirectory
from qkan import QKan, list_selected_items
from qkan.database.dbfunc import DBConnection
from qkan.database.qkan_utils import fehlermeldung
logger = logging.getLogger("QKan.mu.application_dialog")
EXPORT_CLASS, _ = uic.loadUiType(
os.path.join(os.path.dirname(__file__), "res", "mu_export_dialog_base.ui")
)
class _Dialog(QDialog):
def __init__(
self,
default_dir: str,
tr: Callable,
parent: Optional[QWidget] = None,
):
# noinspection PyArgumentList
super().__init__(parent)
self.setupUi(self)
self.default_dir = default_dir
logger.debug(
f"mu_porter.application_dialog._Dialog.__init__:"
f"\nself.default_dir: {self.default_dir}"
)
self.tr = tr
class ExportDialog(_Dialog, EXPORT_CLASS): # type: ignore
tf_database: QLineEdit
tf_template: QLineEdit
tf_exportdb: QLineEdit
cb_use_templatedir: QCheckBox
pb_database: QPushButton
pb_template: QPushButton
pb_exportdb: QPushButton
cb_haltungen: QCheckBox
cb_schaechte: QCheckBox
cb_auslaesse: QCheckBox
cb_speicher: QCheckBox
cb_pumpen: QCheckBox
cb_wehre: QCheckBox
cb_flaechen: QCheckBox
cb_rohrprofile: QCheckBox
cb_abflussparameter: QCheckBox
cb_bodenklassen: QCheckBox
cb_einleitdirekt: QCheckBox
cb_aussengebiete: QCheckBox
cb_einzugsgebiete: QCheckBox
cb_tezg: QCheckBox
rb_update: QRadioButton
rb_append: QRadioButton
lw_teilgebiete: QListWidget
db_qkan: DBConnection
# cb_export_schaechte: QCheckBox
# cb_export_auslaesse: QCheckBox
# cb_export_speicher: QCheckBox
# cb_export_haltungen: QCheckBox
# cb_export_pumpen: QCheckBox
# cb_export_wehre: QCheckBox
def __init__(
self,
default_dir: str,
tr: Callable,
parent: Optional[QWidget] = None,
):
# noinspection PyArgumentList
super().__init__(default_dir, tr, parent)
self.default_dir = default_dir
# Attach events
# self.pb_database.clicked.connect(self.select_database) # ergibt sich aus Projekt
self.pb_exportdb.clicked.connect(self.select_exportdb)
self.pb_template.clicked.connect(self.select_template)
# self.button_box.helpRequested.connect(self.click_help)
# Aktionen zu lw_teilgebiete: QListWidget
self.cb_selActive.stateChanged.connect(self.click_selection)
self.lw_teilgebiete.itemClicked.connect(self.count_selection)
self.lw_teilgebiete.itemClicked.connect(self.click_lw_teilgebiete)
# Init fields
# Datenbanken und Vorlagen aus config übernehmen
# self.tf_database.setText(QKan.config.mu.database)
self.tf_exportdb.setText(QKan.config.mu.export_file)
self.tf_template.setText(QKan.config.mu.template)
# Auswahl der zu exportierenden Tabellen
self.cb_haltungen.setChecked(QKan.config.check_export.haltungen)
self.cb_schaechte.setChecked(QKan.config.check_export.schaechte)
self.cb_auslaesse.setChecked(QKan.config.check_export.auslaesse)
self.cb_speicher.setChecked(QKan.config.check_export.speicher)
self.cb_pumpen.setChecked(QKan.config.check_export.pumpen)
self.cb_wehre.setChecked(QKan.config.check_export.wehre)
self.cb_flaechen.setChecked(QKan.config.check_export.flaechen)
self.cb_rohrprofile.setChecked(QKan.config.check_export.rohrprofile)
self.cb_abflussparameter.setChecked(QKan.config.check_export.abflussparameter)
self.cb_bodenklassen.setChecked(QKan.config.check_export.bodenklassen)
self.cb_einleitdirekt.setChecked(QKan.config.check_export.einleitdirekt)
self.cb_aussengebiete.setChecked(QKan.config.check_export.aussengebiete)
self.cb_einzugsgebiete.setChecked(QKan.config.check_export.einzugsgebiete)
self.cb_tezg.setChecked(QKan.config.check_export.tezg)
# Aktionen beim Export
self.rb_append.setChecked(QKan.config.check_export.append)
self.rb_update.setChecked(QKan.config.check_export.update)
# deaktiviert, weil sich die Quelldatenbank aus dem Projekt ergibt
# def select_database(self):
# # noinspection PyArgumentList,PyCallByClass
# filename, _ = QFileDialog.getOpenFileName(
# self,
# self.tr("Zu importierende SQLite-Datei"),
# self.default_dir,
# "*.sqlite",
# )
# if filename:
# self.tf_database.setText(filename)
# self.default_dir = os.path.dirname(filename)
def select_template(self) -> None:
# noinspection PyArgumentList,PyCallByClass
if self.cb_use_templatedir.isChecked():
# TODO: Replace with QKan.config.project.template?
searchdir = str(Path(pluginDirectory("qkan")) / "templates" / "Projekt.qgs")
else:
searchdir = self.default_dir
# noinspection PyCallByClass,PyArgumentList
filename, _ = QFileDialog.getOpenFileName(
self,
self.tr("Vorlage für die zu erstellende Mike+-Datei"),
searchdir,
"*.sqlite",
)
if filename:
self.tf_template.setText(filename)
# self.default_dir = os.path.dirname(filename)
def select_exportdb(self) -> None:
# noinspection PyArgumentList,PyCallByClass
filename, _ = QFileDialog.getSaveFileName(
self,
self.tr("Zu erstellende Mike+-Datei"),
self.default_dir,
"*.sqlite",
)
if filename:
self.tf_exportdb.setText(filename)
# self.default_dir = os.path.dirname(filename)
def click_selection(self) -> None:
"""Reagiert auf Checkbox zur Aktivierung der Auswahl"""
# Checkbox hat den Status nach dem Klick
if self.cb_selActive.isChecked():
# Nix tun ...
logger.debug("\nChecked = True")
else:
# Auswahl deaktivieren und Liste zurücksetzen
anz = self.lw_teilgebiete.count()
for i in range(anz):
item = self.lw_teilgebiete.item(i)
item.setSelected(False)
# self.lw_teilgebiete.setItemSelected(item, False)
# Anzahl in der Anzeige aktualisieren
self.count_selection()
def click_lw_teilgebiete(self) -> None:
"""Reaktion auf Klick in Tabelle"""
self.cb_selActive.setChecked(True)
self.count_selection()
def count_selection(self) -> bool:
"""
Zählt nach Änderung der Auswahlen in den Listen im Formular die Anzahl
der betroffenen Flächen und Haltungen
"""
if not self.db_qkan:
logger.error("db_qkan is not initialized.")
return False
teilgebiete: List[str] = list_selected_items(self.lw_teilgebiete)
# teilgebiete: List[str] = [] # Todo: wieder aktivieren
# Zu berücksichtigende Flächen zählen
auswahl = ""
if len(teilgebiete) != 0:
auswahl = " WHERE flaechen.teilgebiet in ('{}')".format(
"', '".join(teilgebiete)
)
sql = f"SELECT count(*) AS anzahl FROM flaechen {auswahl}"
if not self.db_qkan.sql(sql, "QKan_ExportHE.application.countselection (1)"):
return False
daten = self.db_qkan.fetchone()
if not (daten is None):
self.lf_anzahl_flaechen.setText(str(daten[0]))
else:
self.lf_anzahl_flaechen.setText("0")
# Zu berücksichtigende Schächte zählen
auswahl = ""
if len(teilgebiete) != 0:
auswahl = " WHERE schaechte.teilgebiet in ('{}')".format(
"', '".join(teilgebiete)
)
sql = f"SELECT count(*) AS anzahl FROM schaechte {auswahl}"
if not self.db_qkan.sql(sql, "QKan_ExportHE.application.countselection (2) "):
return False
daten = self.db_qkan.fetchone()
if not (daten is None):
self.lf_anzahl_schaechte.setText(str(daten[0]))
else:
self.lf_anzahl_schaechte.setText("0")
# Zu berücksichtigende Haltungen zählen
auswahl = ""
if len(teilgebiete) != 0:
auswahl = " WHERE haltungen.teilgebiet in ('{}')".format(
"', '".join(teilgebiete)
)
sql = f"SELECT count(*) AS anzahl FROM haltungen {auswahl}"
if not self.db_qkan.sql(sql, "QKan_ExportHE.application.countselection (3) "):
return False
daten = self.db_qkan.fetchone()
if not (daten is None):
self.lf_anzahl_haltungen.setText(str(daten[0]))
else:
self.lf_anzahl_haltungen.setText("0")
return True
def prepareDialog(self, db_qkan) -> bool:
"""Füllt Auswahllisten im Dialog"""
self.db_qkan = db_qkan
# Check, ob alle Teilgebiete in Flächen, Schächten und Haltungen auch in Tabelle "teilgebiete" enthalten
sql = """INSERT INTO teilgebiete (tgnam)
SELECT teilgebiet FROM flaechen
WHERE teilgebiet IS NOT NULL AND
teilgebiet NOT IN (SELECT tgnam FROM teilgebiete)
GROUP BY teilgebiet"""
if not self.db_qkan.sql(sql, "mu_porter.application_dialog.connectQKanDB (1) "):
return False
sql = """INSERT INTO teilgebiete (tgnam)
SELECT teilgebiet FROM haltungen
WHERE teilgebiet IS NOT NULL AND
teilgebiet NOT IN (SELECT tgnam FROM teilgebiete)
GROUP BY teilgebiet"""
if not self.db_qkan.sql(sql, "mu_porter.application_dialog.connectQKanDB (2) "):
return False
sql = """INSERT INTO teilgebiete (tgnam)
SELECT teilgebiet FROM schaechte
WHERE teilgebiet IS NOT NULL AND
teilgebiet NOT IN (SELECT tgnam FROM teilgebiete)
GROUP BY teilgebiet"""
if not self.db_qkan.sql(sql, "mu_porter.application_dialog.connectQKanDB (3) "):
return False
self.db_qkan.commit()
# Anlegen der Tabelle zur Auswahl der Teilgebiete
# Zunächst wird die Liste der beim letzten Mal gewählten Teilgebiete aus config gelesen
teilgebiete = QKan.config.selections.teilgebiete
# Abfragen der Tabelle teilgebiete nach Teilgebieten
sql = 'SELECT "tgnam" FROM "teilgebiete" GROUP BY "tgnam"'
if not self.db_qkan.sql(sql, "mu_porter.application_dialog.connectQKanDB (4) "):
return False
daten = self.db_qkan.fetchall()
self.lw_teilgebiete.clear()
for ielem, elem in enumerate(daten):
self.lw_teilgebiete.addItem(QListWidgetItem(elem[0]))
try:
if elem[0] in teilgebiete:
self.lw_teilgebiete.setCurrentRow(ielem)
except BaseException as err:
fehlermeldung(
(
"mu_porter.application_dialog.connectQKanDB, "
f"Fehler in elem = {elem}\n"
),
repr(err),
)
return True
def connectHEDB(self, database_he: str) -> None:
"""Attach SQLite-Database with MU Data"""
sql = f'ATTACH DATABASE "{database_he}" AS he'
if self.db_qkan is None or not self.db_qkan.sql(
sql, "MuPorter.run_export_to_mu Attach MU"
):
return
IMPORT_CLASS, _ = uic.loadUiType(
os.path.join(os.path.dirname(__file__), "res", "mu_import_dialog_base.ui")
)
class ImportDialog(_Dialog, IMPORT_CLASS): # type: ignore
tf_database: QLineEdit
tf_import: QLineEdit
tf_project: QLineEdit
pb_database: QPushButton
pb_import: QPushButton
pb_project: QPushButton
pw_epsg: QgsProjectionSelectionWidget
cb_haltungen: QCheckBox
cb_schaechte: QCheckBox
cb_auslaesse: QCheckBox
cb_speicher: QCheckBox
cb_pumpen: QCheckBox
cb_wehre: QCheckBox
cb_flaechen: QCheckBox
cb_rohrprofile: QCheckBox
cb_abflussparameter: QCheckBox
cb_bodenklassen: QCheckBox
cb_einleitdirekt: QCheckBox
cb_aussengebiete: QCheckBox
cb_einzugsgebiete: QCheckBox
# cb_tezg_ef: QCheckBox
cb_tezg_hf: QCheckBox
# cb_tezg_tf: QCheckBox
rb_update: QRadioButton
rb_append: QRadioButton
def __init__(
self,
default_dir: str,
tr: Callable,
parent: Optional[QWidget] = None,
):
# noinspection PyCallByClass,PyArgumentList
super().__init__(default_dir, tr, parent)
self.default_dir = default_dir
# Attach events
self.pb_import.clicked.connect(self.select_import)
self.pb_project.clicked.connect(self.select_project)
self.pb_database.clicked.connect(self.select_database)
self.cb_flaechen.clicked.connect(self.check_flaechen)
self.cb_tezg_hf.clicked.connect(self.check_tezg_hf)
# self.button_box.helpRequested.connect(self.click_help)
# Init fields
self.tf_database.setText(QKan.config.mu.database)
self.tf_import.setText(QKan.config.mu.import_file)
# noinspection PyCallByClass,PyArgumentList
self.pw_epsg.setCrs(QgsCoordinateReferenceSystem.fromEpsgId(QKan.config.epsg))
self.tf_project.setText(QKan.config.project.file)
self.cb_haltungen.setChecked(QKan.config.check_import.haltungen)
self.cb_schaechte.setChecked(QKan.config.check_import.schaechte)
self.cb_auslaesse.setChecked(QKan.config.check_import.auslaesse)
self.cb_speicher.setChecked(QKan.config.check_import.speicher)
self.cb_pumpen.setChecked(QKan.config.check_import.pumpen)
self.cb_wehre.setChecked(QKan.config.check_import.wehre)
self.cb_flaechen.setChecked(QKan.config.check_import.flaechen)
self.cb_tezg_hf.setChecked(QKan.config.check_import.tezg_hf)
self.cb_rohrprofile.setChecked(QKan.config.check_import.rohrprofile)
self.cb_abflussparameter.setChecked(QKan.config.check_import.abflussparameter)
self.cb_bodenklassen.setChecked(QKan.config.check_import.bodenklassen)
self.cb_einleitdirekt.setChecked(QKan.config.check_import.einleitdirekt)
self.cb_aussengebiete.setChecked(QKan.config.check_import.aussengebiete)
self.cb_einzugsgebiete.setChecked(QKan.config.check_import.einzugsgebiete)
# self.cb_tezg_ef.setChecked(QKan.config.check_import.tezg_ef)
self.cb_tezg_hf.setChecked(QKan.config.check_import.tezg_hf)
# self.cb_tezg_tf.setChecked(QKan.config.check_import.tezg_tf)
self.rb_append.setChecked(QKan.config.check_import.append)
self.rb_update.setChecked(QKan.config.check_import.update)
def select_import(self) -> None:
# noinspection PyArgumentList,PyCallByClass
filename, _ = QFileDialog.getOpenFileName(
self,
self.tr("Zu importierende Mike+-Datei"),
self.default_dir,
"*.sqlite",
)
if filename:
self.tf_import.setText(filename)
self.default_dir = os.path.dirname(filename)
def select_project(self) -> None:
# noinspection PyArgumentList,PyCallByClass
filename, _ = QFileDialog.getSaveFileName(
self,
self.tr("Zu erstellende Projektdatei"),
self.default_dir,
"*.qgs",
)
if filename:
self.tf_project.setText(filename)
self.default_dir = os.path.dirname(filename)
def select_database(self) -> None:
# noinspection PyArgumentList,PyCallByClass
filename, _ = QFileDialog.getSaveFileName(
self,
self.tr("Zu erstellende SQLite-Datei"),
self.default_dir,
"*.sqlite",
)
if filename:
self.tf_database.setText(filename)
self.default_dir = os.path.dirname(filename)
def check_flaechen(self) -> None:
# noinspection PyArgumentList,PyCallByClass
if self.cb_flaechen.isChecked():
QKan.config.check_import.tezg_hf = False
self.cb_tezg_hf.setChecked(False)
def check_tezg_hf(self) -> None:
# noinspection PyArgumentList,PyCallByClass
if self.cb_tezg_hf.isChecked():
QKan.config.check_import.flaechen = False
self.cb_flaechen.setChecked(False)
| hoettges/QKan | qkan/muporter/application_dialog.py | Python | gpl-3.0 | 17,035 |
from feature_format import featureFormat, targetFeatureSplit
import pickle
from sklearn.naive_bayes import GaussianNB
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.grid_search import GridSearchCV
import numpy as np
# loading the enron data dictionary
with open("final_project_dataset.pkl", "r") as data_file:
data_dict = pickle.load(data_file)
# removing 'TOTAL' outlier
del data_dict['TOTAL']
# creating new features
for name in data_dict:
if data_dict[name]["total_payments"] != "NaN" and\
data_dict[name]["total_stock_value"] != "NaN":
data_dict[name]["ttl_pay_stock"] = \
data_dict[name]["total_payments"] + \
data_dict[name]["total_stock_value"]
else:
data_dict[name]["ttl_pay_stock"] = 0.0
# list containing all labels and features except email
feat_list = ['poi',
'salary',
'to_messages',
'deferral_payments',
'total_payments',
'exercised_stock_options',
'bonus',
'restricted_stock',
'shared_receipt_with_poi',
'restricted_stock_deferred',
'total_stock_value',
'expenses',
'loan_advances',
'from_messages',
'other',
'from_this_person_to_poi',
'director_fees',
'deferred_income',
'long_term_incentive',
'from_poi_to_this_person',
'ttl_pay_stock']
# Selecting the best features using GridSearchCV
data = featureFormat(data_dict, feat_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
pipe = Pipeline([('KBest', SelectKBest()),
('clf', GaussianNB())])
K = [1,2,3,4,5]
param_grid = [{'KBest__k': K}]
gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='f1')
gs.fit(features, labels)
kb = SelectKBest(k=gs.best_params_['KBest__k'])
kb.fit(features, labels)
best_feat = list(kb.get_support(indices=True)+1)
print "Best f1 score:", gs.best_score_
print "No. of features used for the best f1 score:", gs.best_params_['KBest__k']
print "Names of features used:\n", [feat_list[i] for i in best_feat]
final_feat_list = ['poi',
'salary',
'exercised_stock_options',
'bonus',
'total_stock_value']
# Computing evaluation metrics using the selected features
final_data = featureFormat(data_dict, final_feat_list, sort_keys = True)
final_labels, final_features = targetFeatureSplit(final_data)
final_sss = StratifiedShuffleSplit(final_labels, 1000, random_state = 42)
accuracy = []
precision = []
recall = []
f1 = []
for train_indices, test_indices in final_sss:
features_train = [final_features[i] for i in train_indices]
features_test = [final_features[j] for j in test_indices]
labels_train = [final_labels[i] for i in train_indices]
labels_test = [final_labels[j] for j in test_indices]
clf = GaussianNB()
clf.fit(features_train, labels_train)
pred = clf.predict(features_test)
accuracy.append(accuracy_score(labels_test, pred))
precision.append(precision_score(labels_test, pred))
recall.append(recall_score(labels_test, pred))
f1.append(f1_score(labels_test, pred))
print "Evaluation results of GaussianNB using best features:"
print "Mean Accuracy:", np.mean(accuracy)
print "Mean Precision:", np.mean(precision)
print "Mean Recall:", np.mean(recall)
print "Mean f1 score:", np.mean(f1)
| rjegankumar/enron_email_fraud_identification | nb_classifier.py | Python | mit | 3,699 |
#------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" A tap changer that changes the voltage ratio impacting the voltage magnitude but not direclty the phase angle across the transformer..
"""
# <<< imports
# @generated
from ucte.wires.tap_changer import TapChanger
from google.appengine.ext import db
# >>> imports
class RatioTapChanger(TapChanger):
""" A tap changer that changes the voltage ratio impacting the voltage magnitude but not direclty the phase angle across the transformer..
"""
# <<< ratio_tap_changer.attributes
# @generated
# >>> ratio_tap_changer.attributes
# <<< ratio_tap_changer.references
# @generated
# The transformer winding to which the ratio tap changer belongs.
transformer_winding = db.ReferenceProperty(db.Model,
collection_name="_ratio_tap_changer_set") # ratio_tap_changer
# >>> ratio_tap_changer.references
# <<< ratio_tap_changer.operations
# @generated
# >>> ratio_tap_changer.operations
# EOF -------------------------------------------------------------------------
| rwl/openpowersystem | ucte/wires/ratio_tap_changer.py | Python | agpl-3.0 | 1,893 |
#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from setuptools import setup
install_requires = [
'cloudify-plugins-common==3.3a4',
'cloudify-rest-client==3.3a4',
'cloudify-script-plugin==1.3a4',
'cloudify-diamond-plugin==1.3a4',
'click==4.0',
'celery==3.1.17',
'jinja2==2.7.2',
'pywinrm==0.0.3',
'fabric==1.8.3'
]
setup(
name='cloudify-agent',
version='3.3a4',
author='Gigaspaces',
author_email='cloudify@gigaspaces.com',
packages=[
'worker_installer',
'windows_agent_installer',
'plugin_installer',
'windows_plugin_installer',
'cloudify_agent',
'cloudify_agent.api',
'cloudify_agent.api.pm',
'cloudify_agent.api.plugins',
'cloudify_agent.installer',
'cloudify_agent.installer.config',
'cloudify_agent.installer.runners',
'cloudify_agent.shell',
'cloudify_agent.shell.commands'
],
package_data={
'cloudify_agent': [
'resources/disable-requiretty.sh',
'resources/crontab/disable.sh.template',
'resources/crontab/enable.sh.template',
'resources/respawn.sh.template',
'resources/pm/initd/initd.conf.template',
'resources/pm/initd/initd.template',
'resources/pm/detach/detach.conf.template',
'resources/pm/detach/detach.template',
'resources/pm/nssm/nssm.exe',
'resources/pm/nssm/nssm.conf.template',
]
},
description='Cloudify Agent Implementation (Celery based)',
install_requires=install_requires,
license='LICENSE',
entry_points={
'console_scripts': [
'cfy-agent = cloudify_agent.shell.main:main',
]
}
)
| geokala/cloudify-agent | setup.py | Python | apache-2.0 | 2,354 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2011 CCI Connect asbl (http://www.cciconnect.be) All Rights Reserved.
# Philmer <philmer@cciconnect.be>
{
'name': 'Accounting Consistency Tests',
'version': '1.0',
'category': 'Accounting/Accounting',
'description': """
Asserts on accounting.
======================
With this module you can manually check consistencies and inconsistencies of accounting module from menu Reporting/Accounting/Accounting Tests.
You can write a query in order to create Consistency Test and you will get the result of the test
in PDF format which can be accessed by Menu Reporting -> Accounting Tests, then select the test
and print the report from Print button in header area.
""",
'depends': ['account'],
'data': [
'security/ir.model.access.csv',
'views/accounting_assert_test_views.xml',
'report/accounting_assert_test_reports.xml',
'data/accounting_assert_test_data.xml',
'report/report_account_test_templates.xml',
],
'installable': True,
'license': 'LGPL-3',
}
| jeremiahyan/odoo | addons/account_test/__manifest__.py | Python | gpl-3.0 | 1,166 |
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(slightlyoff): move to using shared version of this script.
'''This script makes it easy to combine libs and object files to a new lib,
optionally removing some of the object files in the input libs by regular
expression matching.
For usage information, run the script with a --help argument.
'''
import optparse
import os
import re
import subprocess
import sys
def Shell(*args):
'''Runs the program and args in args, returns the output from the program.'''
process = subprocess.Popen(args,
stdin = None,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
output = process.stdout.readlines()
process.wait()
retcode = process.returncode
if retcode != 0:
raise RuntimeError('%s exited with status %d' % (args[0], retcode))
return output
def CollectRemovals(remove_re, inputs):
'''Returns a list of all object files in inputs that match remove_re.'''
removals = []
for input in inputs:
output = Shell('lib.exe', '/list', input)
for line in output:
line = line.rstrip()
if remove_re.search(line):
removals.append(line)
return removals
def CombineLibraries(output, remove_re, inputs):
'''Combines all the libraries and objects in inputs, while removing any
object files that match remove_re.
'''
removals = []
if remove_re:
removals = CollectRemovals(remove_re, inputs)
if len(removals) > 0:
print 'Removals: ', removals
args = ['lib.exe', '/out:%s' % output]
args += ['/remove:%s' % obj for obj in removals]
args += inputs
Shell(*args)
USAGE = '''usage: %prog [options] <lib or obj>+
Combines input libraries or objects into an output library, while removing
any object file (in the input libraries) that matches a given regular
expression.
'''
def GetOptionParser():
parser = optparse.OptionParser(USAGE)
parser.add_option('-o', '--output', dest = 'output',
help = 'write to this output library')
parser.add_option('-r', '--remove', dest = 'remove',
help = 'object files matching this regexp will be removed '
'from the output library')
return parser
def Main():
'''Main function for this script'''
parser = GetOptionParser()
(opt, args) = parser.parse_args()
output = opt.output
remove = opt.remove
if not output:
parser.error('You must specify an output file')
if not args:
parser.error('You must specify at least one object or library')
output = output.strip()
if remove:
remove = remove.strip()
if remove:
try:
remove_re = re.compile(opt.remove)
except:
parser.error('%s is not a valid regular expression' % opt.remove)
else:
remove_re = None
if sys.platform != 'win32' and sys.platform != 'cygwin':
parser.error('this script only works on Windows for now')
# If this is set, we can't capture lib.exe's output.
if 'VS_UNICODE_OUTPUT' in os.environ:
del os.environ['VS_UNICODE_OUTPUT']
CombineLibraries(output, remove_re, args)
return 0
if __name__ == '__main__':
sys.exit(Main())
| kuscsik/chromiumembedded | tools/combine_libs.py | Python | bsd-3-clause | 3,327 |
# -*- coding: utf-8 -*-
# Copyright © 2015-2017 AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import stock_picking_wave
| oihane/temp-addons | stock_picking_wave_package_label/models/__init__.py | Python | agpl-3.0 | 157 |
"""
Instructor API endpoint urls.
"""
from django.conf.urls import patterns, url
urlpatterns = patterns(
'',
url(r'^students_update_enrollment$',
'instructor.views.api.students_update_enrollment', name="students_update_enrollment"),
url(r'^register_and_enroll_students$',
'instructor.views.api.register_and_enroll_students', name="register_and_enroll_students"),
url(r'^list_course_role_members$',
'instructor.views.api.list_course_role_members', name="list_course_role_members"),
url(r'^modify_access$',
'instructor.views.api.modify_access', name="modify_access"),
url(r'^bulk_beta_modify_access$',
'instructor.views.api.bulk_beta_modify_access', name="bulk_beta_modify_access"),
url(r'^get_grading_config$',
'instructor.views.api.get_grading_config', name="get_grading_config"),
url(r'^get_students_features(?P<csv>/csv)?$',
'instructor.views.api.get_students_features', name="get_students_features"),
url(r'^get_user_invoice_preference$',
'instructor.views.api.get_user_invoice_preference', name="get_user_invoice_preference"),
url(r'^get_sale_records(?P<csv>/csv)?$',
'instructor.views.api.get_sale_records', name="get_sale_records"),
url(r'^get_sale_order_records$',
'instructor.views.api.get_sale_order_records', name="get_sale_order_records"),
url(r'^sale_validation_url$',
'instructor.views.api.sale_validation', name="sale_validation"),
url(r'^get_anon_ids$',
'instructor.views.api.get_anon_ids', name="get_anon_ids"),
url(r'^get_distribution$',
'instructor.views.api.get_distribution', name="get_distribution"),
url(r'^get_student_progress_url$',
'instructor.views.api.get_student_progress_url', name="get_student_progress_url"),
url(r'^reset_student_attempts$',
'instructor.views.api.reset_student_attempts', name="reset_student_attempts"),
url( # pylint: disable=bad-continuation
r'^rescore_problem$',
'instructor.views.api.rescore_problem',
name="rescore_problem"
), url(
r'^reset_student_attempts_for_entrance_exam$',
'instructor.views.api.reset_student_attempts_for_entrance_exam',
name="reset_student_attempts_for_entrance_exam"
), url(
r'^rescore_entrance_exam$',
'instructor.views.api.rescore_entrance_exam',
name="rescore_entrance_exam"
), url(
r'^list_entrance_exam_instructor_tasks',
'instructor.views.api.list_entrance_exam_instructor_tasks',
name="list_entrance_exam_instructor_tasks"
), url(
r'^mark_student_can_skip_entrance_exam',
'instructor.views.api.mark_student_can_skip_entrance_exam',
name="mark_student_can_skip_entrance_exam"
),
url(r'^list_instructor_tasks$',
'instructor.views.api.list_instructor_tasks', name="list_instructor_tasks"),
url(r'^list_background_email_tasks$',
'instructor.views.api.list_background_email_tasks', name="list_background_email_tasks"),
url(r'^list_email_content$',
'instructor.views.api.list_email_content', name="list_email_content"),
url(r'^list_forum_members$',
'instructor.views.api.list_forum_members', name="list_forum_members"),
url(r'^update_forum_role_membership$',
'instructor.views.api.update_forum_role_membership', name="update_forum_role_membership"),
url(r'^proxy_legacy_analytics$',
'instructor.views.api.proxy_legacy_analytics', name="proxy_legacy_analytics"),
url(r'^send_email$',
'instructor.views.api.send_email', name="send_email"),
url(r'^change_due_date$', 'instructor.views.api.change_due_date',
name='change_due_date'),
url(r'^reset_due_date$', 'instructor.views.api.reset_due_date',
name='reset_due_date'),
url(r'^show_unit_extensions$', 'instructor.views.api.show_unit_extensions',
name='show_unit_extensions'),
url(r'^show_student_extensions$', 'instructor.views.api.show_student_extensions',
name='show_student_extensions'),
# Grade downloads...
url(r'^list_report_downloads$',
'instructor.views.api.list_report_downloads', name="list_report_downloads"),
url(r'calculate_grades_csv$',
'instructor.views.api.calculate_grades_csv', name="calculate_grades_csv"),
# Registration Codes..
url(r'get_registration_codes$',
'instructor.views.api.get_registration_codes', name="get_registration_codes"),
url(r'generate_registration_codes$',
'instructor.views.api.generate_registration_codes', name="generate_registration_codes"),
url(r'active_registration_codes$',
'instructor.views.api.active_registration_codes', name="active_registration_codes"),
url(r'spent_registration_codes$',
'instructor.views.api.spent_registration_codes', name="spent_registration_codes"),
# Coupon Codes..
url(r'get_coupon_codes',
'instructor.views.api.get_coupon_codes', name="get_coupon_codes"),
# spoc gradebook
url(r'^gradebook$',
'instructor.views.api.spoc_gradebook', name='spoc_gradebook'),
# Cohort management
url(r'add_users_to_cohorts$',
'instructor.views.api.add_users_to_cohorts', name="add_users_to_cohorts"),
# Certificates
url(r'^generate_example_certificates$',
'instructor.views.api.generate_example_certificates',
name='generate_example_certificates'),
url(r'^enable_certificate_generation$',
'instructor.views.api.enable_certificate_generation',
name='enable_certificate_generation'),
)
| beni55/edx-platform | lms/djangoapps/instructor/views/api_urls.py | Python | agpl-3.0 | 5,610 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import traceback
from StringIO import StringIO
import re
import datetime
from urllib import urlencode
from collections import defaultdict
from django import http
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import redirect_to_login
from django.db import transaction
from django.core.urlresolvers import reverse
from django.conf import settings
from django.shortcuts import redirect, get_object_or_404
from django.contrib import messages
from django.db.models import Q
from django.template import Context, loader
from django.core.mail import get_connection, EmailMessage
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.shortcuts import render
from django.views.decorators.http import require_POST
from django.contrib.sites.models import RequestSite
from django.core.cache import cache
from django.db.models import Min, Count
import vobject
from .models import Entry, Hours, BlacklistedUser, FollowingUser, UserKey
from pto.apps.users.models import UserProfile, User
from pto.apps.users.utils import ldap_lookup
from .utils import parse_datetime, DatetimeParseError
from .utils.countrytotals import UnrecognizedCountryError, get_country_totals
import utils
import forms
from .decorators import json_view
from .csv_export import UnicodeWriter as CSVUnicodeWriter
def valid_email(value):
try:
validate_email(value)
return True
except ValidationError:
return False
def handler500(request):
data = {}
if settings.TRACEBACKS_ON_500:
err_type, err_value, err_traceback = sys.exc_info()
out = StringIO()
traceback.print_exc(file=out)
traceback_formatted = out.getvalue()
data['err_type'] = err_type
data['err_value'] = err_value
data['err_traceback'] = traceback_formatted
data['_report_traceback'] = True
else:
data['_report_traceback'] = False
return render(request, '500.html', data, status=500)
def home(request): # aka dashboard
data = {}
data['mobile'] = request.MOBILE # thank you django-mobility (see settings)
if data['mobile']:
# unless an explicit cookie it set, redirect to /mobile/
if not request.COOKIES.get('no-mobile', False):
return redirect(reverse('mobile.home'))
# now do what the login_required would usually do
if not request.user.is_authenticated():
path = request.get_full_path()
return redirect_to_login(path)
data['page_title'] = "Dashboard"
profile = request.user.get_profile()
if profile and profile.country in ('GB', 'FR', 'DE'):
first_day = 1 # 1=Monday
else:
first_day = 0 # default to 0=Sunday
data['first_day'] = first_day
if 'all-rightnow' in request.GET:
MAX_RIGHT_NOWS = 9999
else:
MAX_RIGHT_NOWS = 20
## Commented out whilst we decide whether to keep it at all
#right_nows, right_now_users = get_right_nows()
#data['right_nows'] = right_nows
#data['right_now_users'] = right_now_users
#if len(right_now_users) > MAX_RIGHT_NOWS:
# data['right_now_too_many'] = (len(data['right_now_users'])
# - MAX_RIGHT_NOWS)
# data['right_now_users'] = data['right_now_users'][:MAX_RIGHT_NOWS]
#else:
# data['right_now_too_many'] = None
data.update(get_taken_info(request.user))
data['calendar_url'] = _get_user_calendar_url(request)
cache_key = 'recently_created_%s' % request.user.pk
recently_created = cache.get(cache_key)
if recently_created:
data['recently_created'] = recently_created
cache.delete(cache_key)
return render(request, 'dates/home.html', data)
def _get_user_calendar_url(request):
user_key, __ = UserKey.objects.get_or_create(user=request.user)
base_url = '%s://%s' % (request.is_secure() and 'https' or 'http',
RequestSite(request).domain)
return base_url + reverse('dates.calendar_vcal', args=(user_key.key,))
def get_taken_info(user):
data = {}
profile = user.get_profile()
if profile.country:
data['country'] = profile.country
try:
data['country_totals'] = get_country_totals(profile.country)
except UnrecognizedCountryError:
data['unrecognized_country'] = True
today = datetime.date.today()
start_date = datetime.date(today.year, 1, 1)
last_date = datetime.date(today.year + 1, 1, 1)
from django.db.models import Sum
qs = Entry.objects.filter(
user=user,
start__gte=start_date,
end__lt=last_date
)
agg = qs.aggregate(Sum('total_hours'))
total_hours = agg['total_hours__sum']
if total_hours is None:
total_hours = 0
data['taken'] = _friendly_format_hours(total_hours)
return data
def _friendly_format_hours(total_hours):
days = 1.0 * total_hours / settings.WORK_DAY
hours = total_hours % settings.WORK_DAY
if not total_hours:
return '0 days'
elif total_hours < settings.WORK_DAY:
return '%s hours' % total_hours
elif total_hours == settings.WORK_DAY:
return '1 day'
else:
if not hours:
return '%d days' % days
else:
return '%s days' % days
def get_right_nows():
right_now_users = []
right_nows = defaultdict(list)
_today = datetime.date.today()
for entry in (Entry.objects
.filter(start__lte=_today,
end__gte=_today,
total_hours__gte=0)
.order_by('user__first_name',
'user__last_name',
'user__username')):
if entry.user not in right_now_users:
right_now_users.append(entry.user)
left = (entry.end - _today).days + 1
right_nows[entry.user].append((left, entry))
return right_nows, right_now_users
def get_upcomings(max_days=14):
users = []
upcoming = defaultdict(list)
today = datetime.date.today()
max_future = today + datetime.timedelta(days=max_days)
for entry in (Entry.objects
.filter(start__gt=today,
start__lt=max_future,
total_hours__gte=0)
.order_by('user__first_name',
'user__last_name',
'user__username')):
if entry.user not in users:
users.append(entry.user)
days = (entry.start - today).days + 1
upcoming[entry.user].append((days, entry))
return upcoming, users
def make_entry_title(entry, this_user, include_details=True):
if entry.user != this_user:
if entry.user.first_name:
title = '%s %s - ' % (entry.user.first_name,
entry.user.last_name)
else:
title = '%s - ' % entry.user.username
else:
title = ''
days = 0
for hour in Hours.objects.filter(entry=entry):
if hour.hours == 8:
days += 1
elif hour.hours == 4:
days += 0.5
if days > 1:
if int(days) == days:
title += '%d days' % days
else:
title += '%s days' % days
if Hours.objects.filter(entry=entry, birthday=True).exists():
title += ' (includes birthday)'
elif (days == 1 and entry.total_hours == 0 and
Hours.objects.filter(entry=entry, birthday=True)):
title += 'Birthday!'
elif days == 1 and entry.total_hours == 8:
title += '1 day'
else:
title += '%s hours' % entry.total_hours
if entry.details:
if days == 1:
max_length = 20
else:
max_length = 40
if include_details:
title += ', '
if len(entry.details) > max_length:
title += entry.details[:max_length] + '...'
else:
title += entry.details
return title
@json_view
def calendar_events(request):
if not request.user.is_authenticated():
return http.HttpResponseForbidden('Must be logged in')
if not request.GET.get('start'):
return http.HttpResponseBadRequest('Argument start missing')
if not request.GET.get('end'):
return http.HttpResponseBadRequest('Argument end missing')
try:
start = parse_datetime(request.GET['start'])
except DatetimeParseError:
return http.HttpResponseBadRequest('Invalid start')
try:
end = parse_datetime(request.GET['end'])
except DatetimeParseError:
return http.HttpResponseBadRequest('Invalid end')
entries = []
COLORS = ("#EAA228", "#c5b47f", "#579575", "#839557", "#958c12",
"#953579", "#4b5de4", "#d8b83f", "#ff5800", "#0085cc",
"#c747a3", "#cddf54", "#FBD178", "#26B4E3", "#bd70c7")
user_ids = [request.user.pk]
colors = {}
colors_fullnames = []
colors[request.user.pk] = None
colors_fullnames.append((request.user.pk, 'Me myself and I', '#3366CC'))
for i, user_ in enumerate(get_observed_users(request.user, max_depth=2)):
user_ids.append(user_.pk)
colors[user_.pk] = COLORS[i]
full_name = user_.get_full_name()
if not full_name:
full_name = user_.username
colors_fullnames.append((
user_.pk,
full_name,
colors[user_.pk]
))
_managers = {}
def can_see_details(user):
if request.user.is_superuser:
return True
if request.user.pk == user.pk:
return True
if user.pk not in _managers:
_profile = user.get_profile()
_manager = None
if _profile and _profile.manager_user:
_manager = _profile.manager_user.pk
_managers[user.pk] = _manager
return _managers[user.pk] == request.user.pk
visible_user_ids = set()
for entry in (Entry.objects
.filter(user__in=user_ids,
total_hours__gte=0,
total_hours__isnull=False)
.select_related('user')
.exclude(Q(end__lt=start) | Q(start__gt=end))):
visible_user_ids.add(entry.user.pk)
entries.append({
'id': entry.pk,
'title': make_entry_title(entry, request.user,
include_details=can_see_details(entry.user)),
'start': entry.start.strftime('%Y-%m-%d'),
'end': entry.end.strftime('%Y-%m-%d'),
'color': colors[entry.user.pk],
'mine': entry.user.pk == request.user.pk,
})
colors = [dict(name=x, color=y) for (pk, x, y) in colors_fullnames
if pk in visible_user_ids]
return {'events': entries, 'colors': colors}
def get_minions(user, depth=1, max_depth=2):
minions = []
for minion in (UserProfile.objects.filter(manager_user=user)
.select_related('manager_user')
.order_by('manager_user')):
minions.append(minion.user)
if depth < max_depth:
minions.extend(get_minions(minion.user,
depth=depth + 1,
max_depth=max_depth))
return minions
def get_siblings(user):
profile = user.get_profile()
if not profile.manager_user:
return []
users = []
for profile in (UserProfile.objects
.filter(manager_user=profile.manager_user)
.exclude(pk=user.pk)
.select_related('user')):
users.append(profile.user)
return users
def get_followed_users(user):
users = []
for each in (FollowingUser.objects
.filter(follower=user)
.select_related('following')):
users.append(each.following)
return users
def get_observed_users(this_user, depth=1, max_depth=2):
users = []
def is_blacklisted(user):
return (BlacklistedUser.objects
.filter(observer=this_user, observable=user)
.exists())
for user in get_minions(this_user, depth=depth, max_depth=max_depth):
if user not in users:
if not is_blacklisted(user):
users.append(user)
for user in get_siblings(this_user):
if user not in users:
if not is_blacklisted(user):
users.append(user)
profile = this_user.get_profile()
manager = profile.manager_user
if manager and manager not in users:
if not is_blacklisted(manager):
users.append(manager)
for user in get_followed_users(this_user):
if user not in users:
users.append(user)
return users
@transaction.commit_on_success
@login_required
def notify(request):
data = {}
data['page_title'] = "Notify about new vacation"
if request.method == 'POST':
form = forms.AddForm(request.user, data=request.POST)
if form.is_valid():
start = form.cleaned_data['start']
end = form.cleaned_data['end']
details = form.cleaned_data['details'].strip()
notify = form.cleaned_data['notify']
entry = Entry.objects.create(
user=request.user,
start=start,
end=end,
details=details,
)
clean_unfinished_entries(entry)
messages.info(request, 'Entry added, now specify hours')
url = reverse('dates.hours', args=[entry.pk])
request.session['notify_extra'] = notify
return redirect(url)
else:
initial = {}
if request.GET.get('start'):
try:
initial['start'] = parse_datetime(request.GET['start'])
except DatetimeParseError:
pass
if request.GET.get('end'):
try:
initial['end'] = parse_datetime(request.GET['end'])
except DatetimeParseError:
pass
form = forms.AddForm(request.user, initial=initial)
profile = request.user.get_profile()
manager = None
if profile and profile.manager:
manager = ldap_lookup.fetch_user_details(profile.manager)
data['hr_managers'] = [x.user for x in
(UserProfile.objects
.filter(hr_manager=True)
.select_related('user'))]
data['manager'] = manager
data['all_managers'] = [x for x in data['hr_managers'] if x]
if manager:
data['all_managers'].append(manager)
data['form'] = form
return render(request, 'dates/notify.html', data)
@transaction.commit_on_success
@login_required
def cancel_notify(request):
Entry.objects.filter(user=request.user, total_hours__isnull=True).delete()
return redirect(reverse('dates.home'))
def clean_unfinished_entries(good_entry):
# delete all entries that don't have total_hours and touch on the
# same dates as this good one
bad_entries = (Entry.objects
.filter(user=good_entry.user,
total_hours__isnull=True)
.exclude(pk=good_entry.pk))
for entry in bad_entries:
entry.delete()
@transaction.commit_on_success
@login_required
def hours(request, pk):
data = {}
entry = get_object_or_404(Entry, pk=pk)
if entry.user != request.user:
if not (request.user.is_staff or request.user.is_superuser):
return http.HttpResponseForbidden('insufficient access')
if request.method == 'POST':
form = forms.HoursForm(entry, data=request.POST)
if form.is_valid():
total_hours, is_edit = save_entry_hours(entry, form)
extra_users = request.session.get('notify_extra', '')
extra_users = [x.strip() for x
in extra_users.split(';')
if x.strip()]
success, email_addresses = send_email_notification(
entry,
extra_users,
is_edit=is_edit,
)
assert success
#messages.info(request,
# '%s hours of vacation logged.' % total_hours
#)
recently_created = make_entry_title(entry, request.user)
cache_key = 'recently_created_%s' % request.user.pk
cache.set(cache_key, recently_created, 60)
url = reverse('dates.emails_sent', args=[entry.pk])
url += '?' + urlencode({'e': email_addresses}, True)
return redirect(url)
else:
initial = {}
for date in utils.get_weekday_dates(entry.start, entry.end):
try:
#hours_ = Hours.objects.get(entry=entry, date=date)
hours_ = Hours.objects.get(date=date, entry__user=entry.user)
initial[date.strftime('d-%Y%m%d')] = hours_.hours
except Hours.DoesNotExist:
initial[date.strftime('d-%Y%m%d')] = settings.WORK_DAY
form = forms.HoursForm(entry, initial=initial)
data['form'] = form
if entry.total_hours:
data['total_hours'] = entry.total_hours
else:
total_days = 0
for date in utils.get_weekday_dates(entry.start, entry.end):
try:
hours_ = Hours.objects.get(entry=entry, date=date)
print hours_.hours
if hours_.hours == settings.WORK_DAY:
total_days += 1
elif hours_.hours:
total_days += .5
except Hours.DoesNotExist:
total_days += 1
data['total_days'] = total_days
notify = request.session.get('notify_extra', [])
data['notify'] = notify
return render(request, 'dates/hours.html', data)
def save_entry_hours(entry, form):
assert form.is_valid()
total_hours = 0
for date in utils.get_weekday_dates(entry.start, entry.end):
hours = int(form.cleaned_data[date.strftime('d-%Y%m%d')])
birthday = False
if hours == -1:
birthday = True
hours = 0
assert hours >= 0 and hours <= settings.WORK_DAY, hours
try:
hours_ = Hours.objects.get(entry__user=entry.user,
date=date)
if hours_.hours:
# this nullifies the previous entry on this date
reverse_entry = Entry.objects.create(
user=hours_.entry.user,
start=date,
end=date,
details=hours_.entry.details,
total_hours=hours_.hours * -1,
)
Hours.objects.create(
entry=reverse_entry,
hours=hours_.hours * -1,
date=date,
)
#hours_.hours = hours # nasty stuff!
#hours_.birthday = birthday
#hours_.save()
except Hours.DoesNotExist:
# nothing to credit
pass
Hours.objects.create(
entry=entry,
hours=hours,
date=date,
birthday=birthday,
)
total_hours += hours
#raise NotImplementedError
is_edit = entry.total_hours is not None
#if entry.total_hours is not None:
entry.total_hours = total_hours
entry.save()
return total_hours, is_edit
def send_email_notification(entry, extra_users, is_edit=False):
email_addresses = []
for profile in (UserProfile.objects
.filter(hr_manager=True,
user__email__isnull=False)):
email_addresses.append(profile.user.email)
profile = entry.user.get_profile()
if profile and profile.manager:
manager = ldap_lookup.fetch_user_details(profile.manager)
if manager.get('mail'):
email_addresses.append(manager['mail'])
if extra_users:
email_addresses.extend(extra_users)
email_addresses = list(set(email_addresses)) # get rid of dupes
if not email_addresses:
email_addresses = [settings.FALLBACK_TO_ADDRESS]
if is_edit:
subject = settings.EMAIL_SUBJECT_EDIT
else:
subject = settings.EMAIL_SUBJECT
subject = subject % dict(
first_name=entry.user.first_name,
last_name=entry.user.last_name,
username=entry.user.username,
email=entry.user.email,
)
message = template = loader.get_template('dates/notification.txt')
context = {
'entry': entry,
'user': entry.user,
'is_edit': is_edit,
'settings': settings,
'start_date': entry.start.strftime(settings.DEFAULT_DATE_FORMAT),
}
body = template.render(Context(context)).strip()
connection = get_connection()
message = EmailMessage(
subject=subject,
body=body,
from_email=entry.user.email,
to=email_addresses,
cc=entry.user.email and [entry.user.email] or None,
connection=connection
)
success = message.send()
return success, email_addresses
@login_required
def emails_sent(request, pk):
data = {}
entry = get_object_or_404(Entry, pk=pk)
if entry.user != request.user:
if not (request.user.is_staff or request.user.is_superuser):
return http.HttpResponseForbidden('insufficient access')
emails = request.REQUEST.getlist('e')
if isinstance(emails, basestring):
emails = [emails]
data['emails'] = emails
data['emailed_users'] = []
for email in emails:
record = ldap_lookup.fetch_user_details(email)
if record:
data['emailed_users'].append(record)
else:
data['emailed_users'].append(email)
show_fireworks = not request.COOKIES.get('no_fw', False)
data['show_fireworks'] = show_fireworks
return render(request, 'dates/emails_sent.html', data)
@login_required
def list_(request):
data = {}
form = forms.ListFilterForm(date_format='%d %B %Y',
data=request.GET)
if form.is_valid():
data['filters'] = form.cleaned_data
data['today'] = datetime.date.today()
entries_base = Entry.objects.all()
try:
data['first_date'] = entries_base.order_by('start')[0].start
data['last_date'] = entries_base.order_by('-end')[0].end
data['first_filed_date'] = (entries_base
.order_by('add_date')[0]
.add_date)
except IndexError:
# first run, not so important
data['first_date'] = datetime.date(2000, 1, 1)
data['last_date'] = datetime.date(2000, 1, 1)
data['first_filed_date'] = datetime.date(2000, 1, 1)
data['form'] = form
data['query_string'] = request.META.get('QUERY_STRING')
return render(request, 'dates/list.html', data)
@login_required
def list_csv(request):
entries = get_entries_from_request(request.GET)
response = http.HttpResponse(mimetype='text/csv')
writer = CSVUnicodeWriter(response)
writer.writerow((
'ID',
'EMAIL',
'FIRST NAME',
'LAST NAME',
'ADDED',
'START',
'END',
'DAYS',
'DETAILS',
'CITY',
'COUNTRY',
'START DATE',
))
profiles = {} # basic memoization
for entry in entries:
if entry.user.pk not in profiles:
profiles[entry.user.pk] = entry.user.get_profile()
profile = profiles[entry.user.pk]
writer.writerow((
str(entry.pk),
entry.user.email,
entry.user.first_name,
entry.user.last_name,
entry.add_date.strftime('%Y-%m-%d'),
entry.start.strftime('%Y-%m-%d'),
entry.end.strftime('%Y-%m-%d'),
str(entry.total_days),
entry.details,
profile.city,
profile.country,
(profile.start_date and
profile.start_date.strftime('%Y-%m-%d') or ''),
))
return response
@json_view
@login_required
def list_json(request):
entries = get_entries_from_request(request.GET)
_managers = {}
def can_see_details(user):
if request.user.is_superuser:
return True
if request.user.pk == user.pk:
return True
if user.pk not in _managers:
_profile = user.get_profile()
_manager = None
if _profile and _profile.manager_user:
_manager = _profile.manager_user.pk
_managers[user.pk] = _manager
return _managers[user.pk] == request.user.pk
data = []
profiles = {}
for entry in entries:
if entry.user.pk not in profiles:
profiles[entry.user.pk] = entry.user.get_profile()
profile = profiles[entry.user.pk]
if entry.total_hours < 0:
details = '*automatic edit*'
elif can_see_details(entry.user):
details = entry.details
else:
details = ''
row = [entry.user.email,
entry.user.first_name,
entry.user.last_name,
entry.add_date.strftime('%Y-%m-%d'),
entry.total_days,
entry.start.strftime('%Y-%m-%d'),
entry.end.strftime('%Y-%m-%d'),
profile.city,
profile.country,
details,
#edit_link,
#hours_link
]
data.append(row)
return {'aaData': data}
def get_entries_from_request(data):
form = forms.ListFilterForm(date_format='%d %B %Y', data=data)
if not form.is_valid():
return Entry.objects.none()
fdata = form.cleaned_data
entries = (Entry.objects.exclude(total_hours=None)
.select_related('user'))
if fdata.get('date_from'):
entries = entries.filter(end__gte=fdata.get('date_from'))
if fdata.get('date_to'):
entries = entries.filter(start__lte=fdata.get('date_to'))
if fdata.get('date_filed_from'):
entries = entries.filter(
add_date__gte=fdata.get('date_filed_from'))
if fdata.get('date_filed_to'):
entries = entries.filter(
add_date__lt=fdata.get('date_filed_to') +
datetime.timedelta(days=1))
if fdata.get('name'):
name = fdata['name'].strip()
if valid_email(name):
entries = entries.filter(user__email__iexact=name)
else:
entries = entries.filter(
Q(user__first_name__istartswith=name.split()[0]) |
Q(user__last_name__iendswith=name.split()[-1])
)
if fdata.get('country'):
country = fdata['country'].strip()
_users = UserProfile.objects.filter(country=country).values('user_id')
entries = entries.filter(user__id__in=_users)
return entries
@login_required
def following(request):
data = {}
observed = []
_followed = get_followed_users(request.user)
_minions_1 = get_minions(request.user, depth=1, max_depth=1)
_minions_2 = get_minions(request.user, depth=1, max_depth=2)
_manager = request.user.get_profile().manager_user
for user in sorted(get_observed_users(request.user, max_depth=2),
lambda x, y: cmp(x.first_name.lower(),
y.first_name.lower())):
if user in _minions_1:
reason = 'direct manager of'
elif user in _minions_2:
reason = 'indirect manager of'
elif user == _manager:
reason = 'your manager'
elif user in _followed:
reason = 'curious'
else:
reason = 'teammate'
observed.append((user, reason))
not_observed = (BlacklistedUser.objects
.filter(observer=request.user)
.order_by('observable__first_name'))
data['observed'] = observed
data['not_observed'] = [x.observable for x in not_observed]
return render(request, 'dates/following.html', data)
@json_view
@login_required
@transaction.commit_on_success
@require_POST
def save_following(request):
search = request.POST.get('search')
if not search:
return http.HttpResponseBadRequest('Missing search')
if (-1 < search.rfind('<') < search.rfind('@') < search.rfind('>')):
try:
email = re.findall('<([\w\.\-]+@[\w\.\-]+)>', search)[0]
email = email.strip()
validate_email(email)
except (ValidationError, IndexError):
email = None
elif search.isdigit():
try:
email = User.objects.get(pk=search).email
except User.DoesNotExist:
email = None # will deal with this later
else:
found = []
result = ldap_lookup.search_users(search, 30, autocomplete=True)
for each in result:
try:
found.append(User.objects.get(email__iexact=each['mail']))
except User.DoesNotExist:
pass
if len(found) > 1:
return http.HttpResponseBadRequest('More than one user found')
elif not found:
return http.HttpResponseBadRequest('No user found')
else:
email = found[0].email
# if no email is found in the search, it's an error
if not email:
return http.HttpResponseBadRequest('No email found')
try:
user = User.objects.get(email__iexact=email)
except User.DoesNotExist:
return http.HttpResponseBadRequest('No user by that email found')
FollowingUser.objects.get_or_create(
follower=request.user,
following=user,
)
# find a reason why we're following this user
_minions_1 = get_minions(request.user, depth=1, max_depth=1)
_minions_2 = get_minions(request.user, depth=1, max_depth=2)
if user in _minions_1:
reason = 'direct manager of'
elif user in _minions_2:
reason = 'indirect manager of'
elif user == request.user.get_profile().manager_user:
reason = 'your manager'
elif (request.user.get_profile().manager_user
and user in _minions_1):
reason = 'teammate'
else:
reason = 'curious'
name = ('%s %s' % (user.first_name,
user.last_name)).strip()
if not name:
name = user.username
data = {
'id': user.pk,
'name': name,
'reason': reason,
}
return data
@json_view
@login_required
@transaction.commit_on_success
@require_POST
def save_unfollowing(request):
remove = request.POST.get('remove')
try:
user = User.objects.get(pk=remove)
except (ValueError, User.DoesNotExist):
return http.HttpResponseBadRequest('Invalid user ID')
for f in (FollowingUser.objects
.filter(follower=request.user, following=user)):
f.delete()
data = {}
if user in get_observed_users(request.user, max_depth=2):
# if not blacklisted, this user will automatically re-appear
BlacklistedUser.objects.get_or_create(
observer=request.user,
observable=user
)
data['id'] = user.pk
name = ('%s %s' % (user.first_name,
user.last_name)).strip()
if not name:
name = user.username
data['name'] = name
return data
def calendar_vcal(request, key):
base_url = '%s://%s' % (request.is_secure() and 'https' or 'http',
RequestSite(request).domain)
home_url = base_url + '/'
cal = vobject.iCalendar()
cal.add('x-wr-calname').value = 'Mozilla Vacation'
try:
user = UserKey.objects.get(key=key).user
except UserKey.DoesNotExist:
# instead of raising a HTTP error, respond a calendar
# that urges the user to update the stale URL
event = cal.add('vevent')
event.add('summary').value = (
"Calendar expired. Visit %s#calendarurl to get the "
"new calendar URL" % home_url
)
today = datetime.date.today()
event.add('dtstart').value = today
event.add('dtend').value = today
event.add('url').value = '%s#calendarurl' % (home_url,)
event.add('description').value = ("The calendar you used has expired "
"and is no longer associated with any user")
return _render_vcalendar(cal, key)
# always start on the first of this month
today = datetime.date.today()
#first = datetime.date(today.year, today.month, 1)
user_ids = [user.pk]
for user_ in get_observed_users(user, max_depth=2):
user_ids.append(user_.pk)
entries = (Entry.objects
.filter(user__in=user_ids,
total_hours__gte=0,
total_hours__isnull=False,
end__gte=today)
.select_related('user')
)
_list_base_url = base_url + reverse('dates.list')
def make_list_url(entry):
name = entry.user.get_full_name()
if not name:
name = entry.user.username
data = {
'date_from': entry.start.strftime('%d %B %Y'),
'date_to': entry.end.strftime('%d %B %Y'),
'name': name
}
return _list_base_url + '?' + urlencode(data, True)
for entry in entries:
event = cal.add('vevent')
event.add('summary').value = '%s Vacation' % make_entry_title(entry, user,
include_details=False)
event.add('dtstart').value = entry.start
event.add('dtend').value = entry.end
#url = (home_url + '?cal_y=%d&cal_m=%d' %
# (slot.date.year, slot.date.month))
event.add('url').value = make_list_url(entry)
#event.add('description').value = entry.details
event.add('description').value = "Log in to see the details"
return _render_vcalendar(cal, key)
def _render_vcalendar(cal, key):
#return http.HttpResponse(cal.serialize(),
# mimetype='text/plain;charset=utf-8'
# )
resp = http.HttpResponse(cal.serialize(),
mimetype='text/calendar;charset=utf-8'
)
filename = '%s.ics' % (key,)
resp['Content-Disposition'] = 'inline; filename="%s"' % filename
return resp
@login_required
@transaction.commit_on_success
def reset_calendar_url(request):
for each in UserKey.objects.filter(user=request.user):
each.delete()
return redirect(reverse('dates.home') + '#calendarurl')
@login_required
def about_calendar_url(request):
data = {}
data['calendar_url'] = _get_user_calendar_url(request)
return render(request, 'dates/about-calendar-url.html', data)
@login_required
def duplicate_report(request):
data = {
'filter_errors': None,
}
if request.method == 'POST':
raise NotImplementedError
else:
form = forms.DuplicateReportFilterForm(date_format='%d %B %Y',
data=request.GET)
user = request.user
filter_ = dict(user=user)
if form.is_valid():
if form.cleaned_data['user']:
user = form.cleaned_data['user']
if user != request.user:
if not (request.user.is_superuser
or request.user.is_staff):
if user != request.user:
return http.HttpResponse(
"Only available for admins")
filter_['user'] = user
if form.cleaned_data['since']:
filter_['start__gte'] = form.cleaned_data['since']
data['since'] = form.cleaned_data['since']
else:
data['filter_errors'] = form.errors
data['first_date'] = (Entry.objects
.filter(user=user)
.aggregate(Min('start'))
['start__min'])
start_dates = (Entry.objects
.filter(**filter_)
.values("start")
.annotate(Count("start"))
.order_by('-start__count'))
groups = []
for each in start_dates:
if each['start__count'] <= 1:
break
entries = Entry.objects.filter(user=user, start=each['start'])
details = [x.details for x in entries]
note = "Probably not a mistake"
if len(set(details)) == 1:
note = ("Probably a duplicate! "
"The details are the same for each entry")
else:
note = "Possibly not a duplicate since the details different"
groups.append((entries, note))
data['groups'] = groups
if 'since' not in data:
data['since'] = data['first_date']
return render(request, 'dates/duplicate-report.html', data)
| mozilla/pto | pto/apps/dates/views.py | Python | mpl-2.0 | 37,380 |
import tempfile
import unittest
from brocclib.get_xml import (
get_taxid, get_lineage, NcbiEutils,
)
class NcbiEutilsTests(unittest.TestCase):
def test_get_taxon_id(self):
db = NcbiEutils()
self.assertEqual(db.get_taxon_id("HQ608011.1"), "531911")
self.assertEqual(db.taxon_ids, {"HQ608011.1": "531911"})
def test_get_lineage(self):
db = NcbiEutils()
observed_lineage = db.get_lineage("531911")
expected_lineage = [
('cellular organisms', 'no rank'),
('Eukaryota', 'superkingdom'),
('Opisthokonta', 'no rank'),
('Fungi', 'kingdom'),
('Dikarya', 'subkingdom'),
('Ascomycota', 'phylum'),
('saccharomyceta', 'no rank'),
('Pezizomycotina', 'subphylum'),
('leotiomyceta', 'no rank'),
('sordariomyceta', 'no rank'),
('Sordariomycetes', 'class'),
('Xylariomycetidae', 'subclass'),
('Xylariales', 'order'),
('Sporocadaceae', 'family'),
('Pestalotiopsis', 'genus'),
('Pestalotiopsis maculiformans', 'species')]
self.assertEqual(observed_lineage, expected_lineage)
self.assertEqual(db.lineages, {'531911': expected_lineage})
class FunctionTests(unittest.TestCase):
def test_get_taxid(self):
self.assertEqual(get_taxid("312434489"), "531911")
def test_get_taxid_from_accession(self):
self.assertEqual(get_taxid("HQ844023.1"), "1056490")
def test_getLineage(self):
# Should this return the HTTP 400 error?
self.assertEqual(get_lineage("asdf"), None)
if __name__ == '__main__':
unittest.main()
| kylebittinger/brocc | tests/test_get_xml.py | Python | gpl-3.0 | 1,775 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import shipping.models
class Migration(migrations.Migration):
dependencies = [
('shipping', '0002_country_is_main'),
('orders', '0013_order_shipping_cost'),
]
operations = [
migrations.AddField(
model_name='order',
name='country',
field=models.ForeignKey(verbose_name='Страна', default=shipping.models.Country.default_country_id, to='shipping.Country'),
),
]
| juntatalor/qexx | orders/migrations/0014_order_country.py | Python | mit | 559 |
# coding:utf-8
import urllib
import json
import base64
import time
from threading import Timer
from QUANTAXIS_Trade.util import base_trade
import pandas as pd
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
class TdxTradeApiParams:
"""
参见 https://github.com/yutiansut/pytdx/blob/master/pytdx/trade/trade.py
0 资金
1 股份
2 当日委托
3 当日成交
4 可撤单
5 股东代码
6 融资余额
7 融券余额
8 可融证券
9
10
11
12 可申购新股查询
13 新股申购额度查询
14 配号查询
15 中签查询
"""
QUERY_CATEGORY_CASH = 0
QUERY_CATEGORY_STOCKS = 1
QUERY_CATEGORY_ORDER_OF_TODAY = 2
QUERY_CATEGORY_DEAL_OF_TODAY = 3
QUERY_CATEGORY_CANCELABLE_ORDER = 4
QUERY_CATEGORY_SHAREHOLDERS_CODE = 5
QUERY_CATEGORY_BALANCE_OF_MARGIN_LOAN = 6
QUERY_CATEGORY_BALANCE_OF_STOCK_LOAN = 7
QUERY_CATEGORY_OPERABLE_MARGIN_SOTCK = 8
QUERY_CATEGORY_NEW_STOCKS = 12
QUERY_CATEGORY_NEW_STOCKS_QUOTA = 13
QUERY_CATEGORY_NEW_STOCK_NUMBER = 14
QUERY_CATEGORY_NEW_STOCK_HIT = 1
class QATrade_TdxTradeServer(base_trade.QA_Trade_Api):
def __init__(self, broker="http://127.0.0.1:19820/api", encoding="utf-8", enc_key=None, enc_iv=None):
super().__init__()
self._endpoint = broker
self._encoding = "utf-8"
if enc_key == None or enc_iv == None:
self._transport_enc = False
self._transport_enc_key = None
self._transport_enc_iv = None
self._cipher = None
else:
self._transport_enc = True
self._transport_enc_key = enc_key
self._transport_enc_iv = enc_iv
backend = default_backend()
self._cipher = Cipher(algorithms.AES(
enc_key), modes.CBC(enc_iv), backend=backend)
self._session = requests.Session()
self._event_dict = {'logon': self.on_login, 'logoff': self.on_logout, 'ping': self.on_ping,
'query_data': self.on_query_data, 'send_order': self.on_insert_order,
'cancel_order': self.on_cancel_order_event, 'get_quote': self.on_get_quote}
self.client_id = ''
self.account_id = ''
def spi_job(self, params=None):
print(' ')
if self._queue.empty() is False:
job = self._queue.get()
res = self.call(str(job[0]), job[1])
self._event_dict[str(job[0])](res)
else:
self.spi_job()
def call(self, func, params=None):
json_obj = {
"func": func
}
if params is not None:
json_obj["params"] = params
if self._transport_enc:
data_to_send = self.encrypt(json_obj)
response = self._session.post(self._endpoint, data=data_to_send)
else:
response = self._session.post(self._endpoint, json=json_obj)
response.encoding = self._encoding
text = response.text
if self._transport_enc:
decoded_text = self.decrypt(text)
return json.loads(decoded_text)
else:
return json.loads(text)
def encrypt(self, source_obj):
encrypter = self._cipher.encryptor()
source = json.dumps(source_obj)
source = source.encode(self._encoding)
need_to_padding = 16 - (len(source) % 16)
if need_to_padding > 0:
source = source + b'\x00' * need_to_padding
enc_data = encrypter.update(source) + encrypter.finalize()
b64_enc_data = base64.encodebytes(enc_data)
return urllib.parse.quote(b64_enc_data)
def decrypt(self, source):
decrypter = self._cipher.decryptor()
source = urllib.parse.unquote(source)
source = base64.decodebytes(source.encode("utf-8"))
data_bytes = decrypter.update(source) + decrypter.finalize()
return data_bytes.rstrip(b"\x00").decode(self._encoding)
def data_to_df(self, result):
if 'data' in result:
data = result['data']
return pd.DataFrame(data=data)
def get_client_id(self):
return self.client_id
def get_account_id(self):
return self.account_id
def ping(self):
self._queue.put(["ping", {}])
def login(self, ip, port, version, yyb_id, account_id, trade_account, jy_passwrod, tx_password):
self.account_id = account_id
self._queue.put(["logon", {
"ip": ip,
"port": port,
"version": version,
"yyb_id": yyb_id,
"account_no": account_id,
"trade_account": trade_account,
"jy_password": jy_passwrod,
"tx_password": tx_password
}])
def logoff(self, client_id):
self._queue.put(["logoff", {
"client_id": client_id
}])
def query_data(self, client_id, category):
self._queue.put(["query_data", {
"client_id": client_id,
"category": category
}])
def insert_order(self, client_id, category, price_type, gddm, zqdm, price, quantity):
self._queue.put(["send_order", {
'client_id': client_id,
'category': category,
'price_type': price_type,
'gddm': gddm,
'zqdm': zqdm,
'price': price,
'quantity': quantity
}])
def cancel_order(self, client_id, exchange_id, hth):
self._queue.put(["cancel_order", {
'client_id': client_id,
'exchange_id': exchange_id,
'hth': hth
}])
def get_quote(self, client_id, code):
self._queue.put(["get_quote", {
'client_id': client_id,
'code': code,
}])
def query_asset(self):
self._queue.put(["query_data", {
"client_id": self.client_id,
"category": TdxTradeApiParams.QUERY_CATEGORY_CASH
}])
def on_ping(self, data):
print(data)
def on_insert_order(self, data):
print(data)
def on_login(self, data):
print(data)
try:
self.client_id = data['data']['client_id']
# print(self.client_id)
except:
pass
def on_logout(self, data):
print(data)
def on_query_asset(self, data):
print(data)
def on_query_order(self, data):
print(data)
def on_query_data(self, data):
print(data)
def on_query_position(self, data):
print(data)
def on_cancel_order_event(self, data):
print(data)
def on_get_quote(self, data):
print(data)
if __name__ == '__main__':
api = QATrade_TdxTradeServer(broker="http://127.0.0.1:19820/api",
enc_key=b"d29f1e0cd5a611e7", enc_iv=b"b1f4001a7dda7113")
api.ping()
| EmmaIshta/QUANTAXIS | QUANTAXIS_Trade/QA_Tdxtradeserver/__init__.py | Python | mit | 6,984 |
# This is only a test file
import logging
from cloud.metrics.metric import Metric
from cloud.models.virtual_machine import VirtualMachine
# Configure logging for the module name
logger = logging.getLogger(__name__)
# Extends the Metric class to inherit basic functionalities
class VMGetState(Metric):
# Implementation of deployment method
def collect(self, vm_name=None):
try:
vm = VirtualMachine.objects.get(name=vm_name)
return vm.current_state()
except VirtualMachine.DoesNotExist:
raise self.MetricException('No VM with name %s' % vm_name)
except VirtualMachine.VirtualMachineException as e:
raise self.MetricException('Error: %s' % str(e)) | ComputerNetworks-UFRGS/Aurora | cloud/metrics/VMGetState.py | Python | gpl-2.0 | 746 |
# NOTE (CCB): These functions are copied from oscar.apps.offer.custom due to a bug
# detailed at https://github.com/django-oscar/django-oscar/issues/2345. This file
# should be removed after the fix for the bug is released.
# TODO: Issue above is fixed; we need to upgrade to django-oscar==1.5 and this can be removed.
# (https://github.com/django-oscar/django-oscar/commit/38367f9ca854cd21eaf19a174f24b59a0e65cf79)
from oscar.core.loading import get_model
Condition = get_model('offer', 'Condition')
def class_path(klass):
return '%s.%s' % (klass.__module__, klass.__name__)
def create_condition(condition_class, **kwargs):
"""
Create a custom condition instance
"""
return Condition.objects.create(
proxy_class=class_path(condition_class), **kwargs)
| edx/ecommerce | ecommerce/programs/custom.py | Python | agpl-3.0 | 787 |
# -*- coding: utf-8 -*-
#
# papyon - a python client library for Msn
#
# Copyright (C) 2005-2006 Ali Sabil <ali.sabil@gmail.com>
# Copyright (C) 2007-2008 Johann Prieur <johann.prieur@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Profile of the User connecting to the service, as well as the profile of
contacts in his/her contact list.
@sort: Profile, Contact, Group, ClientCapabilities
@group Enums: Presence, Membership, Privacy, NetworkID
@sort: Presence, Membership, Privacy, NetworkID"""
from papyon.util.decorator import rw_property
import gobject
import logging
__all__ = ['Profile', 'Contact', 'Group', 'EndPoint',
'Presence', 'Membership', 'ContactType', 'Privacy', 'NetworkID', 'ClientCapabilities']
logger = logging.getLogger('papyon.profile')
class ClientCapabilities(gobject.GObject):
"""Capabilities of the client. This allow adverstising what the User Agent
is capable of, for example being able to receive video stream, and being
able to receive nudges...
@ivar is_bot: is the client a bot
@type is_bot: bool
@ivar is_mobile_device: is the client running on a mobile device
@type is_mobile_device: bool
@ivar is_msn_mobile: is the client an MSN Mobile device
@type is_msn_mobile: bool
@ivar is_msn_direct_device: is the client an MSN Direct device
@type is_msn_direct_device: bool
@ivar is_media_center_user: is the client running on a Media Center
@type is_media_center_user: bool
@ivar is_msn8_user: is the client using WLM 8
@type is_msn8_user: bool
@ivar is_web_client: is the client web based
@type is_web_client: bool
@ivar is_tgw_client: is the client a gateway
@type is_tgw_client: bool
@ivar has_space: does the user has a space account
@type has_space: bool
@ivar has_webcam: does the user has a webcam plugged in
@type has_webcam: bool
@ivar has_onecare: does the user has the OneCare service
@type has_onecare: bool
@ivar renders_gif: can the client render gif (for ink)
@type renders_gif: bool
@ivar renders_isf: can the client render ISF (for ink)
@type renders_isf: bool
@ivar supports_chunking: does the client supports chunking messages
@type supports_chunking: bool
@ivar supports_direct_im: does the client supports direct IM
@type supports_direct_im: bool
@ivar supports_winks: does the client supports Winks
@type supports_winks: bool
@ivar supports_shared_search: does the client supports Shared Search
@type supports_shared_search: bool
@ivar supports_voice_im: does the client supports voice clips
@type supports_voice_im: bool
@ivar supports_secure_channel: does the client supports secure channels
@type supports_secure_channel: bool
@ivar supports_sip_invite: does the client supports SIP
@type supports_sip_invite: bool
@ivar supports_tunneled_sip: does the client supports tunneled SIP
@type supports_tunneled_sip: bool
@ivar supports_shared_drive: does the client supports File sharing
@type supports_shared_drive: bool
@ivar p2p_supports_turn: does the client supports TURN for p2p transfer
@type p2p_supports_turn: bool
@ivar p2p_bootstrap_via_uun: is the client able to use and understand UUN commands
@type p2p_bootstrap_via_uun: bool
@undocumented: __getattr__, __setattr__, __str__
"""
__gsignals__ = {
"capability-changed": (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object, object)),
}
MSNC = [0x0, # MSNC0
0x10000000, # MSNC1
0x20000000, # MSNC2
0x30000000, # MSNC3
0x40000000, # MSNC4
0x50000000, # MSNC5
0x60000000, # MSNC6
0x70000000, # MSNC7
0x80000000, # MSNC8
0x90000000, # MSNC9
0xA0000000] # MSNC10
_CAPABILITIES = {
'is_bot': 0x00020000,
'is_mobile_device': 0x00000001,
'is_msn_mobile': 0x00000040,
'is_msn_direct_device': 0x00000080,
'is_media_center_user': 0x00002000,
'is_msn8_user': 0x00000002,
'is_web_client': 0x00000200,
'is_tgw_client': 0x00000800,
'has_space': 0x00001000,
'has_webcam': 0x00000010,
'has_onecare': 0x01000000,
'renders_gif': 0x00000004,
'renders_isf': 0x00000008,
'supports_chunking': 0x00000020,
'supports_direct_im': 0x00004000,
'supports_winks': 0x00008000,
'supports_shared_search': 0x00010000,
'supports_voice_im': 0x00040000,
'supports_secure_channel': 0x00080000,
'supports_sip_invite': 0x00100000,
'supports_tunneled_sip': 0x00200000,
'supports_shared_drive': 0x00400000,
'p2p_aware': 0xF0000000,
'p2p_supports_turn': 0x02000000,
'p2p_bootstrap_via_uun': 0x04000000
}
_EXTRA = {
'supports_rtc_video': 0x00000010,
'supports_p2pv2': 0x00000030
}
def __init__(self, msnc=0, client_id="0:0"):
"""Initializer
@param msnc: The MSNC version
@type msnc: integer < 11 and >= 0
@param client_id: the full client ID"""
gobject.GObject.__init__(self)
caps = client_id.split(":")
capabilities = int(caps[0])
if len(caps) > 1:
extra = int(caps[1])
else:
extra = 0
gobject.GObject.__setattr__(self, 'capabilities', self.MSNC[msnc] | capabilities)
gobject.GObject.__setattr__(self, 'extra', extra)
def __getattr__(self, name):
if name in self._CAPABILITIES:
mask = self._CAPABILITIES[name]
id = self.capabilities
elif name in self._EXTRA:
mask = self._EXTRA[name]
id = self.extra
else:
raise AttributeError("object 'ClientCapabilities' has no attribute '%s'" % name)
return (id & mask != 0)
def __setattr__(self, name, value):
if name in self._CAPABILITIES:
mask = self._CAPABILITIES[name]
old_value = bool(self.capabilities & mask)
if value:
gobject.GObject.__setattr__(self, 'capabilities', self.capabilities | mask)
else:
gobject.GObject.__setattr__(self, 'capabilities', self.capabilities & ~mask)
if value != old_value:
self.emit('capability-changed', name, value)
elif name in self._EXTRA:
mask = self._EXTRA[name]
old_value = bool(self.extra & mask)
if value:
gobject.GObject.__setattr__(self, 'extra', self.extra | mask)
else:
gobject.GObject.__setattr__(self, 'extra', self.extra & ~mask)
if value != old_value:
self.emit('capability-changed', name, value)
else:
raise AttributeError("object 'ClientCapabilities' has no attribute '%s'" % name)
def __str__(self):
msnc = self.MSNC.index(self.capabilities & 0xF0000000)
if msnc >= 9:
client_id = "%s:%s" % (self.capabilities, self.extra)
else:
client_id = str(self.capabilities)
return client_id
class NetworkID(object):
"""Refers to the contact Network ID"""
MSN = 1
"""Microsoft Network"""
LCS = 2
"""Microsoft Live Communication Server"""
MOBILE = 4
"""Mobile phones"""
EXTERNAL = 32
"""External IM etwork, currently Yahoo!"""
class Presence(object):
"""Presence states.
The members of this class are used to identify the Presence that a user
wants to advertise to the contacts on his/her contact list.
@cvar ONLINE: online
@cvar BUSY: busy
@cvar IDLE: idle
@cvar AWAY: away
@cvar BE_RIGHT_BACK: be right back
@cvar ON_THE_PHONE: on the phone
@cvar OUT_TO_LUNCH: out to lunch
@cvar INVISIBLE: status hidden from contacts
@cvar OFFLINE: offline"""
ONLINE = 'NLN'
BUSY = 'BSY'
IDLE = 'IDL'
AWAY = 'AWY'
BE_RIGHT_BACK = 'BRB'
ON_THE_PHONE = 'PHN'
OUT_TO_LUNCH = 'LUN'
INVISIBLE = 'HDN'
OFFLINE = 'FLN'
class Privacy(object):
"""User privacy, defines the default policy concerning contacts not
belonging to the ALLOW list nor to the BLOCK list.
@cvar ALLOW: allow by default
@cvar BLOCK: block by default"""
ALLOW = 'AL'
BLOCK = 'BL'
class Membership(object):
"""Contact Membership"""
NONE = 0
"""Contact doesn't belong to the contact list, but belongs to the address book"""
FORWARD = 1
"""Contact belongs to our contact list"""
ALLOW = 2
"""Contact is explicitely allowed to see our presence regardless of the
currently set L{Privacy<papyon.profile.Privacy>}"""
BLOCK = 4
"""Contact is explicitely forbidden from seeing our presence regardless of
the currently set L{Privacy<papyon.profile.Privacy>}"""
REVERSE = 8
"""We belong to the FORWARD list of the contact"""
PENDING = 16
"""Contact pending"""
class ContactType(object):
"""Automatic update status flag"""
ME = "Me"
"""Contact is the user so there's no automatic update relationship"""
EXTERNAL = "Messenger2"
"""Contact is part of an external messenger service so there's no automatic
update relationship with the user"""
REGULAR = "Regular"
"""Contact has no automatic update relationship with the user"""
LIVE = "Live"
"""Contact has an automatic update relationship with the user and an
automatic update already occured"""
LIVE_PENDING = "LivePending"
"""Contact was requested automatic update from the user and didn't
give its authorization yet"""
LIVE_REJECTED = "LiveRejected"
"""Contact was requested automatic update from the user and rejected
the request"""
LIVE_DROPPED = "LiveDropped"
"""Contact had an automatic update relationship with the user but
the contact dropped it"""
class ContactFlag(object):
"""Internal contact flag"""
EXTENDED_PRESENCE_KNOWN = 1
"""Set once we receive the extended presence (UBX) for a buddy"""
class BaseContact(gobject.GObject):
__gsignals__ = {
"end-point-added": (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object,)),
"end-point-removed": (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object,)),
}
__gproperties__ = {
"client-capabilities": (gobject.TYPE_STRING,
"Client capabilities",
"The client capabilities of the contact 's client",
"",
gobject.PARAM_READABLE),
"current-media": (gobject.TYPE_PYOBJECT,
"Current media",
"The current media that the user wants to display",
gobject.PARAM_READABLE),
"display-name": (gobject.TYPE_STRING,
"Friendly name",
"A nickname that the user chooses to display to others",
"",
gobject.PARAM_READABLE),
"end-points": (gobject.TYPE_PYOBJECT,
"End points",
"List of locations where the user is connected",
gobject.PARAM_READABLE),
"flags": (gobject.TYPE_UINT,
"Flags",
"Contact flags.",
0, 1, 0, gobject.PARAM_READABLE),
"msn-object": (gobject.TYPE_STRING,
"MSN Object",
"MSN Object attached to the user, this generally represent "
"its display picture",
"",
gobject.PARAM_READABLE),
"personal-message": (gobject.TYPE_STRING,
"Personal message",
"The personal message that the user wants to display",
"",
gobject.PARAM_READABLE),
"presence": (gobject.TYPE_STRING,
"Presence",
"The presence to show to others",
Presence.OFFLINE,
gobject.PARAM_READABLE),
"signature-sound": (gobject.TYPE_PYOBJECT,
"Signature sound",
"The sound played by others' client when the user connects",
gobject.PARAM_READABLE),
}
def __init__(self):
gobject.GObject.__init__(self)
self._client_capabilities = ClientCapabilities()
self._current_media = None
self._display_name = ""
self._end_points = {}
self._flags = 0
self._personal_message = ""
self._presence = Presence.OFFLINE
self._msn_object = None
self._signature_sound = None
@property
def account(self):
"""Contact account
@rtype: utf-8 encoded string"""
return self._account
@property
def client_id(self):
"""The user capabilities
@rtype: ClientCapabilities"""
return self._client_capabilities
@property
def client_capabilities(self):
"""The user capabilities
@rtype: ClientCapabilities"""
return self._client_capabilities
@property
def current_media(self):
"""Contact current media
@rtype: (artist: string, track: string)"""
return self._current_media
@property
def display_name(self):
"""Contact display name
@rtype: utf-8 encoded string"""
return self._display_name
@property
def end_points(self):
"""List of contact's locations
@rtype: list of string"""
return self._end_points
@property
def flags(self):
"""Internal contact flags
@rtype: bitmask of L{Membership<papyon.profile.ContactFlag}s"""
return self._flags
@property
def id(self):
"""Contact identifier in a GUID form
@rtype: GUID string"""
return self._id
@property
def msn_object(self):
"""Contact MSN Object
@type: L{MSNObject<papyon.p2p.MSNObject>}"""
return self._msn_object
@property
def network_id(self):
"""Contact network ID
@rtype: L{NetworkID<papyon.profile.NetworkID>}"""
return self._network_id
@property
def personal_message(self):
"""Contact personal message
@rtype: utf-8 encoded string"""
return self._personal_message
@property
def presence(self):
"""Contact presence
@rtype: L{Presence<papyon.profile.Presence>}"""
return self._presence
@property
def signature_sound():
"""Contact signature sound
@type: string"""
return self._signature_sound
### flags management
def has_flag(self, flags):
return (self.flags & flags) == flags
def _set_flags(self, flags):
logger.info("Set contact %s flags to %i" % (self._account, flags))
self._flags = flags
self.notify("flags")
def _add_flag(self, flag):
self._set_flags(self._flags | flag)
def _remove_flag(self, flag):
self._set_flags(self._flags & ~flag)
def _server_property_changed(self, name, value):
if name == "client-capabilities":
value = ClientCapabilities(client_id=value)
attr_name = "_" + name.lower().replace("-", "_")
old_value = getattr(self, attr_name)
if value != old_value:
setattr(self, attr_name, value)
self.notify(name)
if name == "end-points":
self._diff_end_points(old_value, value)
def _diff_end_points(self, old_eps, new_eps):
added_eps = set(new_eps.keys()) - set(old_eps.keys())
removed_eps = set(old_eps.keys()) - set(new_eps.keys())
for ep in added_eps:
self.emit("end-point-added", new_eps[ep])
for ep in removed_eps:
self.emit("end-point-removed", old_eps[ep])
def do_get_property(self, pspec):
name = pspec.name.lower().replace("-", "_")
return getattr(self, name)
gobject.type_register(BaseContact)
class Profile(BaseContact):
"""Profile of the User connecting to the service"""
__gproperties__ = {
"profile": (gobject.TYPE_PYOBJECT,
"Profile",
"the text/x-msmsgsprofile sent by the server",
gobject.PARAM_READABLE),
"privacy": (gobject.TYPE_STRING,
"Privacy",
"The privacy policy to use",
Privacy.BLOCK,
gobject.PARAM_READABLE),
}
def __init__(self, account, ns_client):
BaseContact.__init__(self)
self._ns_client = ns_client
self._account = account[0]
self._password = account[1]
self._id = "00000000-0000-0000-0000-000000000000"
self._profile = ""
self._network_id = NetworkID.MSN
self._display_name = self._account.split("@", 1)[0]
self._privacy = Privacy.BLOCK
self._end_point_name = ""
self._client_capabilities = ClientCapabilities(10)
self._client_capabilities.supports_sip_invite = True
self._client_capabilities.supports_tunneled_sip = True
self._client_capabilities.supports_p2pv2 = True
self._client_capabilities.p2p_bootstrap_via_uun = True
self._client_capabilities.connect("capability-changed",
self._client_capability_changed)
self.__pending_set_presence = [self._presence, self._client_capabilities, self._msn_object]
self.__pending_set_personal_message = [self._personal_message, self._current_media]
@property
def password(self):
"""The user password
@rtype: utf-8 encoded string"""
return self._password
@property
def profile(self):
"""The user profile retrieved from the MSN servers
@rtype: dict of fields"""
return self._profile
@rw_property
def display_name():
"""The display name shown to you contacts
@type: utf-8 encoded string"""
def fset(self, display_name):
if not display_name:
return
self._ns_client.set_display_name(display_name)
def fget(self):
return self._display_name
return locals()
@rw_property
def presence():
"""The presence displayed to you contacts
@type: L{Presence<papyon.profile.Presence>}"""
def fset(self, presence):
if presence == self._presence:
return
self.__pending_set_presence[0] = presence
self._ns_client.set_presence(*self.__pending_set_presence)
def fget(self):
return self._presence
return locals()
@rw_property
def privacy():
"""The default privacy, can be either Privacy.ALLOW or Privacy.BLOCK
@type: L{Privacy<papyon.profile.Privacy>}"""
def fset(self, privacy):
self._ns_client.set_privacy(privacy)
def fget(self):
return self._privacy
return locals()
@rw_property
def personal_message():
"""The personal message displayed to you contacts
@type: utf-8 encoded string"""
def fset(self, personal_message):
if personal_message == self._personal_message:
return
self.__pending_set_personal_message[0] = personal_message
self._ns_client.set_personal_message(*self.__pending_set_personal_message)
def fget(self):
return self._personal_message
return locals()
@rw_property
def current_media():
"""The current media displayed to you contacts
@type: (artist: string, track: string)"""
def fset(self, current_media):
if current_media == self._current_media:
return
self.__pending_set_personal_message[1] = current_media
self._ns_client.set_personal_message(*self.__pending_set_personal_message)
def fget(self):
return self._current_media
return locals()
@rw_property
def signature_sound():
"""The sound played when you are connecting
@type: string"""
def fset(self, signature_sound):
if signature_sound == self._signature_sound:
return
self.__pending_set_personal_message[2] = signature_sound
self._ns_client.set_personal_message(*self.__pending_set_personal_message)
def fget(self):
return self._signature_sound
return locals()
@rw_property
def end_point_name():
def fset(self, name):
if name == self._end_point_name:
return
self._ns_client.set_end_point_name(name)
def fget(self):
return self._end_point_name
return locals()
@rw_property
def msn_object():
"""The MSNObject attached to your contact, this MSNObject represents the
display picture to be shown to your peers
@type: L{MSNObject<papyon.p2p.MSNObject>}"""
def fset(self, msn_object):
if msn_object == self._msn_object:
return
self.__pending_set_presence[2] = msn_object
self._ns_client.set_presence(*self.__pending_set_presence)
def fget(self):
return self._msn_object
return locals()
@rw_property
def presence_msn_object():
def fset(self, args):
presence, msn_object = args
if presence == self._presence and msn_object == self._msn_object:
return
self.__pending_set_presence[0] = presence
self.__pending_set_presence[2] = msn_object
self._ns_client.set_presence(*self.__pending_set_presence)
def fget(self):
return self._presence, self._msn_object
return locals()
@rw_property
def personal_message_current_media():
def fset(self, args):
personal_message, current_media = args
if personal_message == self._personal_message and \
current_media == self._current_media:
return
self.__pending_set_personal_message[0] = personal_message
self.__pending_set_personal_message[1] = current_media
self._ns_client.set_personal_message(*self.__pending_set_personal_message)
def fget(self):
return self._personal_message, self._current_media
return locals()
def request_profile_url(self, callback):
self._ns_client.send_url_request(('PROFILE', '0x0409'), callback)
def _client_capability_changed(self, client, name, value):
self.__pending_set_presence[1] = self._client_capabilities
self._ns_client.set_presence(*self.__pending_set_presence)
def _server_property_changed(self, name, value):
if name == "msn-object" and value is not None:
self.__pending_set_presence[2] = value
BaseContact._server_property_changed(self, name, value)
gobject.type_register(Profile)
class Contact(BaseContact):
"""Contact related information"""
__gsignals__ = {
"infos-changed": (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(object,)),
}
__gproperties__ = {
"memberships": (gobject.TYPE_UINT,
"Memberships",
"Membership relation with the contact.",
0, 31, 0, gobject.PARAM_READABLE),
"groups": (gobject.TYPE_PYOBJECT,
"Groups",
"The groups the contact belongs to",
gobject.PARAM_READABLE),
"infos": (gobject.TYPE_PYOBJECT,
"Informations",
"The contact informations",
gobject.PARAM_READABLE),
"contact-type": (gobject.TYPE_PYOBJECT,
"Contact type",
"The contact automatic update status flag",
gobject.PARAM_READABLE),
}
def __init__(self, id, network_id, account, display_name, cid=None,
memberships=Membership.NONE, contact_type=ContactType.REGULAR):
"""Initializer"""
BaseContact.__init__(self)
self._id = id or "00000000-0000-0000-0000-000000000000"
self._cid = cid or "00000000-0000-0000-0000-000000000000"
self._network_id = network_id
self._account = account
self._display_name = display_name
self._attributes = {'icon_url' : None}
self._groups = set()
self._infos = {}
self._memberships = memberships
self._contact_type = contact_type
def __repr__(self):
def memberships_str():
m = []
memberships = self._memberships
if memberships & Membership.FORWARD:
m.append('FORWARD')
if memberships & Membership.ALLOW:
m.append('ALLOW')
if memberships & Membership.BLOCK:
m.append('BLOCK')
if memberships & Membership.REVERSE:
m.append('REVERSE')
if memberships & Membership.PENDING:
m.append('PENDING')
return " | ".join(m)
template = "<papyon.Contact id='%s' network='%u' account='%s' memberships='%s'>"
return template % (self._id, self._network_id, self._account, memberships_str())
@property
def attributes(self):
"""Contact attributes
@rtype: {key: string => value: string}"""
return self._attributes.copy()
@property
def cid(self):
"""Contact ID
@rtype: GUID string"""
return self._cid
@property
def groups(self):
"""Contact list of groups
@rtype: set(L{Group<papyon.profile.Group>}...)"""
return self._groups
@property
def infos(self):
"""Contact informations
@rtype: {key: string => value: string}"""
return self._infos
@property
def memberships(self):
"""Contact membership value
@rtype: bitmask of L{Membership<papyon.profile.Membership>}s"""
return self._memberships
@property
def contact_type(self):
"""Contact automatic update status flag
@rtype: L{ContactType<papyon.profile.ContactType>}"""
return self._contact_type
@property
def domain(self):
"""Contact domain, which is basically the part after @ in the account
@rtype: utf-8 encoded string"""
result = self._account.split('@', 1)
if len(result) > 1:
return result[1]
else:
return ""
@property
def profile_url(self):
"""Contact profile url
@rtype: string"""
account = self._account
return "http://members.msn.com/default.msnw?mem=%s&pgmarket=" % account
### membership management
def is_member(self, memberships):
"""Determines if this contact belongs to the specified memberships
@type memberships: bitmask of L{Membership<papyon.profile.Membership>}s"""
return (self.memberships & memberships) == memberships
def is_mail_contact(self):
"""Determines if this contact is a mail contact"""
blank_id = "00000000-0000-0000-0000-000000000000"
return (not self.is_member(Membership.FORWARD) and self.id != blank_id)
def _set_memberships(self, memberships):
self._memberships = memberships
self.notify("memberships")
def _add_membership(self, membership):
self._memberships |= membership
self.notify("memberships")
def _remove_membership(self, membership):
self._memberships ^= membership
self.notify("memberships")
def _server_attribute_changed(self, name, value):
self._attributes[name] = value
def _server_infos_changed(self, updated_infos):
self._infos.update(updated_infos)
self.emit("infos-changed", updated_infos)
self.notify("infos")
def _reset(self):
self._id = "00000000-0000-0000-0000-000000000000"
self._cid = "00000000-0000-0000-0000-000000000000"
self._groups = set()
self._flags = 0
self._server_property_changed("presence", Presence.OFFLINE)
self._server_property_changed("display-name", self._account)
self._server_property_changed("personal-message", "")
self._server_property_changed("current-media", None)
self._server_property_changed("msn-object", None)
self._server_property_changed("client-capabilities", "0:0")
self._server_property_changed("end-points", {})
self._server_infos_changed({})
### group management
def _add_group_ownership(self, group):
self._groups.add(group)
def _delete_group_ownership(self, group):
self._groups.discard(group)
gobject.type_register(Contact)
class Group(gobject.GObject):
"""Group
@undocumented: __gsignals__, __gproperties__, do_get_property"""
__gproperties__ = {
"name": (gobject.TYPE_STRING,
"Group name",
"Name that the user chooses for the group",
"",
gobject.PARAM_READABLE)
}
def __init__(self, id, name):
"""Initializer"""
gobject.GObject.__init__(self)
self._id = id
self._name = name
@property
def id(self):
"""Group identifier in a GUID form
@rtype: GUID string"""
return self._id
@property
def name(self):
"""Group name
@rtype: utf-8 encoded string"""
return self._name
def _server_property_changed(self, name, value):
attr_name = "_" + name.lower().replace("-", "_")
old_value = getattr(self, attr_name)
if value != old_value:
setattr(self, attr_name, value)
self.notify(name)
def do_get_property(self, pspec):
name = pspec.name.lower().replace("-", "_")
return getattr(self, name)
gobject.type_register(Group)
class EndPoint(object):
def __init__(self, id, caps):
self.id = id
self.capabilities = ClientCapabilities(client_id=caps)
self.name = ""
self.idle = False
self.state = ""
self.client_type = 0
def __eq__(self, endpoint):
return (self.id == endpoint.id and
self.capabilities == endpoint.capabilities and
self.name == endpoint.name and
self.idle == endpoint.idle and
self.state == endpoint.state and
self.client_type == endpoint.client_type)
| billiob/papyon | papyon/profile.py | Python | gpl-2.0 | 31,976 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake VMware VI API implementation.
"""
import collections
import pprint
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova.virt.vmwareapi import error_util
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
'files', 'ClusterComputeResource', 'HostStorageSystem']
_FAKE_FILE_SIZE = 1024
_db_content = {}
LOG = logging.getLogger(__name__)
def log_db_contents(msg=None):
"""Log DB Contents."""
LOG.debug(_("%(text)s: _db_content => %(content)s"),
{'text': msg or "", 'content': pprint.pformat(_db_content)})
def reset(vc=False):
"""Resets the db contents."""
cleanup()
create_network()
create_host_network_system()
create_host_storage_system()
create_host()
ds_ref1 = create_datastore('ds1', 1024, 500)
if vc:
create_host()
ds_ref2 = create_datastore('ds2', 1024, 500)
create_datacenter('dc1', ds_ref1)
if vc:
create_datacenter('dc2', ds_ref2)
create_res_pool()
if vc:
create_cluster('test_cluster', ds_ref1)
create_cluster('test_cluster2', ds_ref2)
def cleanup():
"""Clear the db contents."""
for c in _CLASSES:
# We fake the datastore by keeping the file references as a list of
# names in the db
if c == 'files':
_db_content[c] = []
else:
_db_content[c] = {}
def _create_object(table, table_obj):
"""Create an object in the db."""
_db_content[table][table_obj.obj] = table_obj
def _get_object(obj_ref):
"""Get object for the give reference."""
return _db_content[obj_ref.type][obj_ref]
def _get_objects(obj_type):
"""Get objects of the type."""
lst_objs = FakeRetrieveResult()
for key in _db_content[obj_type]:
lst_objs.add_object(_db_content[obj_type][key])
return lst_objs
def _convert_to_array_of_mor(mors):
"""Wraps the given array into a DataObject."""
array_of_mors = DataObject()
array_of_mors.ManagedObjectReference = mors
return array_of_mors
class FakeRetrieveResult(object):
"""Object to retrieve a ObjectContent list."""
def __init__(self):
self.objects = []
def add_object(self, object):
self.objects.append(object)
class MissingProperty(object):
"""Missing object in ObjectContent's missing set."""
def __init__(self, path='fake-path', message='fake_message',
method_fault=None):
self.path = path
self.fault = DataObject()
self.fault.localizedMessage = message
self.fault.fault = method_fault
def _get_object_refs(obj_type):
"""Get object References of the type."""
lst_objs = []
for key in _db_content[obj_type]:
lst_objs.append(key)
return lst_objs
def _update_object(table, table_obj):
"""Update objects of the type."""
_db_content[table][table_obj.obj] = table_obj
class Prop(object):
"""Property Object base class."""
def __init__(self, name=None, val=None):
self.name = name
self.val = val
class ManagedObjectReference(object):
"""A managed object reference is a remote identifier."""
def __init__(self, name="ManagedObject", value=None):
super(ManagedObjectReference, self)
# Managed Object Reference value attributes
# typically have values like vm-123 or
# host-232 and not UUID.
self.value = value
# Managed Object Reference type
# attributes hold the name of the type
# of the vCenter object the value
# attribute is the identifier for
self.type = name
self._type = name
class ObjectContent(object):
"""ObjectContent array holds dynamic properties."""
# This class is a *fake* of a class sent back to us by
# SOAP. It has its own names. These names are decided
# for us by the API we are *faking* here.
def __init__(self, obj_ref, prop_list=None, missing_list=None):
self.obj = obj_ref
if not isinstance(prop_list, collections.Iterable):
prop_list = []
if not isinstance(missing_list, collections.Iterable):
missing_list = []
# propSet is the name your Python code will need to
# use since this is the name that the API will use
self.propSet = prop_list
# missingSet is the name your python code will
# need to use since this is the name that the
# API we are talking to will use.
self.missingSet = missing_list
class ManagedObject(object):
"""Managed Object base class."""
_counter = 0
def __init__(self, mo_id_prefix="obj"):
"""Sets the obj property which acts as a reference to the object."""
object.__setattr__(self, 'mo_id', self._generate_moid(mo_id_prefix))
object.__setattr__(self, 'propSet', [])
object.__setattr__(self, 'obj',
ManagedObjectReference(self.__class__.__name__,
self.mo_id))
def set(self, attr, val):
"""
Sets an attribute value. Not using the __setattr__ directly for we
want to set attributes of the type 'a.b.c' and using this function
class we set the same.
"""
self.__setattr__(attr, val)
def get(self, attr):
"""
Gets an attribute. Used as an intermediary to get nested
property like 'a.b.c' value.
"""
return self.__getattr__(attr)
def __setattr__(self, attr, val):
# TODO(hartsocks): this is adds unnecessary complexity to the class
for prop in self.propSet:
if prop.name == attr:
prop.val = val
return
elem = Prop()
elem.name = attr
elem.val = val
self.propSet.append(elem)
def __getattr__(self, attr):
# TODO(hartsocks): remove this
# in a real ManagedObject you have to iterate the propSet
# in a real ManagedObject, the propSet is a *set* not a list
for elem in self.propSet:
if elem.name == attr:
return elem.val
msg = _("Property %(attr)s not set for the managed object %(name)s")
raise exception.NovaException(msg % {'attr': attr,
'name': self.__class__.__name__})
def _generate_moid(self, prefix):
"""Generates a new Managed Object ID."""
self.__class__._counter += 1
return prefix + "-" + str(self.__class__._counter)
def __repr__(self):
return jsonutils.dumps(dict([(elem.name, elem.val)
for elem in self.propSet]))
class DataObject(object):
"""Data object base class."""
def __init__(self, obj_name=None):
self.obj_name = obj_name
def __repr__(self):
return str(self.__dict__)
class HostInternetScsiHba(DataObject):
"""
iSCSI Host Bus Adapter
"""
def __init__(self):
super(HostInternetScsiHba, self).__init__()
self.device = 'vmhba33'
self.key = 'key-vmhba33'
class VirtualDisk(DataObject):
"""
Virtual Disk class.
"""
def __init__(self):
super(VirtualDisk, self).__init__()
self.key = 0
self.unitNumber = 0
class VirtualDiskFlatVer2BackingInfo(DataObject):
"""VirtualDiskFlatVer2BackingInfo class."""
def __init__(self):
super(VirtualDiskFlatVer2BackingInfo, self).__init__()
self.thinProvisioned = False
self.eagerlyScrub = False
class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject):
"""VirtualDiskRawDiskMappingVer1BackingInfo class."""
def __init__(self):
super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__()
self.lunUuid = ""
class VirtualLsiLogicController(DataObject):
"""VirtualLsiLogicController class."""
pass
class VirtualLsiLogicSASController(DataObject):
"""VirtualLsiLogicSASController class."""
pass
class VirtualPCNet32(DataObject):
"""VirtualPCNet32 class."""
def __init__(self):
super(VirtualPCNet32, self).__init__()
self.key = 4000
class VirtualMachine(ManagedObject):
"""Virtual Machine class."""
def __init__(self, **kwargs):
super(VirtualMachine, self).__init__("vm")
self.set("name", kwargs.get("name", 'test-vm'))
self.set("runtime.connectionState",
kwargs.get("conn_state", "connected"))
self.set("summary.config.guestId", kwargs.get("guest", "otherGuest"))
ds_do = kwargs.get("ds", None)
self.set("datastore", _convert_to_array_of_mor(ds_do))
self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
"toolsOk"))
self.set("summary.guest.toolsRunningStatus", kwargs.get(
"toolsrunningstate", "guestToolsRunning"))
self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
self.set("config.files.vmPathName", kwargs.get("vmPathName"))
self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
self.set("config.hardware.device", kwargs.get("virtual_device", None))
self.set("config.extraConfig", kwargs.get("extra_config", None))
self.set('runtime.host', kwargs.get("runtime_host", None))
self.device = kwargs.get("virtual_device")
def reconfig(self, factory, val):
"""
Called to reconfigure the VM. Actually customizes the property
setting of the Virtual Machine object.
"""
try:
if not hasattr(val, 'deviceChange'):
return
if len(val.deviceChange) < 2:
return
# Case of Reconfig of VM to attach disk
controller_key = val.deviceChange[1].device.controllerKey
filename = val.deviceChange[1].device.backing.fileName
disk = VirtualDisk()
disk.controllerKey = controller_key
disk_backing = VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk_backing.key = -101
disk.backing = disk_backing
controller = VirtualLsiLogicController()
controller.key = controller_key
self.set("config.hardware.device", [disk, controller,
self.device[0]])
except AttributeError:
# Case of Reconfig of VM to set extra params
self.set("config.extraConfig", val.extraConfig)
class Network(ManagedObject):
"""Network class."""
def __init__(self):
super(Network, self).__init__("network")
self.set("summary.name", "vmnet0")
class ResourcePool(ManagedObject):
"""Resource Pool class."""
def __init__(self, name="test_ResPool", value="resgroup-test"):
super(ResourcePool, self).__init__("rp")
self.set("name", name)
summary = DataObject()
runtime = DataObject()
config = DataObject()
memory = DataObject()
cpu = DataObject()
memoryAllocation = DataObject()
cpuAllocation = DataObject()
memory.maxUsage = 1000 * 1024 * 1024
memory.overallUsage = 500 * 1024 * 1024
cpu.maxUsage = 10000
cpu.overallUsage = 1000
runtime.cpu = cpu
runtime.memory = memory
summary.runtime = runtime
cpuAllocation.limit = 10000
memoryAllocation.limit = 1024
memoryAllocation.reservation = 1024
config.memoryAllocation = memoryAllocation
config.cpuAllocation = cpuAllocation
self.set("summary", summary)
self.set("summary.runtime.memory", memory)
self.set("config", config)
parent = ManagedObjectReference(value=value,
name=name)
owner = ManagedObjectReference(value=value,
name=name)
self.set("parent", parent)
self.set("owner", owner)
class DatastoreHostMount(DataObject):
def __init__(self, value='host-100'):
super(DatastoreHostMount, self).__init__()
host_ref = (_db_content["HostSystem"]
[_db_content["HostSystem"].keys()[0]].obj)
host_system = DataObject()
host_system.ManagedObjectReference = [host_ref]
host_system.value = value
self.key = host_system
class ClusterComputeResource(ManagedObject):
"""Cluster class."""
def __init__(self, name="test_cluster"):
super(ClusterComputeResource, self).__init__("domain")
self.set("name", name)
self.set("host", None)
self.set("datastore", None)
self.set("resourcePool", None)
summary = DataObject()
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
summary.effectiveCpu = 10000
self.set("summary", summary)
def _add_root_resource_pool(self, r_pool):
if r_pool:
self.set("resourcePool", r_pool)
def _add_host(self, host_sys):
if host_sys:
hosts = self.get("host")
if hosts is None:
hosts = DataObject()
hosts.ManagedObjectReference = []
self.set("host", hosts)
hosts.ManagedObjectReference.append(host_sys)
# Update summary every time a new host is added
self._update_summary()
def _add_datastore(self, datastore):
if datastore:
datastores = self.get("datastore")
if datastores is None:
datastores = DataObject()
datastores.ManagedObjectReference = []
self.set("datastore", datastores)
datastores.ManagedObjectReference.append(datastore)
# Method to update summary of a cluster upon host addition
def _update_summary(self):
summary = self.get("summary")
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
hosts = self.get("host")
# Compute the aggregate stats
summary.numHosts = len(hosts.ManagedObjectReference)
for host_ref in hosts.ManagedObjectReference:
host_sys = _get_object(host_ref)
connected = host_sys.get("connected")
host_summary = host_sys.get("summary")
summary.numCpuCores += host_summary.hardware.numCpuCores
summary.numCpuThreads += host_summary.hardware.numCpuThreads
summary.totalMemory += host_summary.hardware.memorySize
free_memory = (host_summary.hardware.memorySize / (1024 * 1024)
- host_summary.quickStats.overallMemoryUsage)
summary.effectiveMemory += free_memory if connected else 0
summary.numEffectiveHosts += 1 if connected else 0
self.set("summary", summary)
class Datastore(ManagedObject):
"""Datastore class."""
def __init__(self, name="fake-ds", capacity=1024, free=500):
super(Datastore, self).__init__("ds")
self.set("summary.type", "VMFS")
self.set("summary.name", name)
self.set("summary.capacity", 1024 * 1024 * 1024 * 1024)
self.set("summary.freeSpace", 500 * 1024 * 1024 * 1024)
self.set("summary.accessible", True)
class HostNetworkSystem(ManagedObject):
"""HostNetworkSystem class."""
def __init__(self, name="networkSystem"):
super(HostNetworkSystem, self).__init__("ns")
self.set("name", name)
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("networkInfo.pnic", net_info_pnic)
class HostStorageSystem(ManagedObject):
"""HostStorageSystem class."""
def __init__(self):
super(HostStorageSystem, self).__init__("storageSystem")
class HostSystem(ManagedObject):
"""Host System class."""
def __init__(self, name="ha-host", connected=True):
super(HostSystem, self).__init__("host")
self.set("name", name)
if _db_content.get("HostNetworkSystem", None) is None:
create_host_network_system()
if not _get_object_refs('HostStorageSystem'):
create_host_storage_system()
host_net_key = _db_content["HostNetworkSystem"].keys()[0]
host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
self.set("configManager.networkSystem", host_net_sys)
host_storage_sys_key = _get_object_refs('HostStorageSystem')[0]
self.set("configManager.storageSystem", host_storage_sys_key)
summary = DataObject()
hardware = DataObject()
hardware.numCpuCores = 8
hardware.numCpuPkgs = 2
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
hardware.uuid = "host-uuid"
hardware.memorySize = 1024 * 1024 * 1024
summary.hardware = hardware
quickstats = DataObject()
quickstats.overallMemoryUsage = 500
summary.quickStats = quickstats
product = DataObject()
product.name = "VMware ESXi"
product.version = "5.0.0"
config = DataObject()
config.product = product
summary.config = config
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("summary", summary)
self.set("capability.maxHostSupportedVcpus", 600)
self.set("summary.runtime.inMaintenanceMode", False)
self.set("runtime.connectionState", "connected")
self.set("summary.hardware", hardware)
self.set("config.network.pnic", net_info_pnic)
self.set("connected", connected)
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = "vSwitch0"
vswitch_do.portgroup = ["PortGroup-vmnet0"]
net_swicth = DataObject()
net_swicth.HostVirtualSwitch = [vswitch_do]
self.set("config.network.vswitch", net_swicth)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-vmnet0"
pg_spec = DataObject()
pg_spec.vlanId = 0
pg_spec.name = "vmnet0"
host_pg_do.spec = pg_spec
host_pg = DataObject()
host_pg.HostPortGroup = [host_pg_do]
self.set("config.network.portgroup", host_pg)
config = DataObject()
storageDevice = DataObject()
iscsi_hba = HostInternetScsiHba()
iscsi_hba.iScsiName = "iscsi-name"
host_bus_adapter_array = DataObject()
host_bus_adapter_array.HostHostBusAdapter = [iscsi_hba]
storageDevice.hostBusAdapter = host_bus_adapter_array
config.storageDevice = storageDevice
self.set("config.storageDevice.hostBusAdapter", host_bus_adapter_array)
# Set the same on the storage system managed object
host_storage_sys = _get_object(host_storage_sys_key)
host_storage_sys.set('storageDeviceInfo.hostBusAdapter',
host_bus_adapter_array)
def _add_iscsi_target(self, data):
default_lun = DataObject()
default_lun.scsiLun = 'key-vim.host.ScsiDisk-010'
default_lun.key = 'key-vim.host.ScsiDisk-010'
default_lun.deviceName = 'fake-device'
default_lun.uuid = 'fake-uuid'
scsi_lun_array = DataObject()
scsi_lun_array.ScsiLun = [default_lun]
self.set("config.storageDevice.scsiLun", scsi_lun_array)
transport = DataObject()
transport.address = [data['target_portal']]
transport.iScsiName = data['target_iqn']
default_target = DataObject()
default_target.lun = [default_lun]
default_target.transport = transport
iscsi_adapter = DataObject()
iscsi_adapter.adapter = 'key-vmhba33'
iscsi_adapter.transport = transport
iscsi_adapter.target = [default_target]
iscsi_topology = DataObject()
iscsi_topology.adapter = [iscsi_adapter]
self.set("config.storageDevice.scsiTopology", iscsi_topology)
def _add_port_group(self, spec):
"""Adds a port group to the host system object in the db."""
pg_name = spec.name
vswitch_name = spec.vswitchName
vlanid = spec.vlanId
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = vswitch_name
vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
vswitches = self.get("config.network.vswitch").HostVirtualSwitch
vswitches.append(vswitch_do)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-%s" % pg_name
pg_spec = DataObject()
pg_spec.vlanId = vlanid
pg_spec.name = pg_name
host_pg_do.spec = pg_spec
host_pgrps = self.get("config.network.portgroup").HostPortGroup
host_pgrps.append(host_pg_do)
class Datacenter(ManagedObject):
"""Datacenter class."""
def __init__(self, name="ha-datacenter", ds_ref=None):
super(Datacenter, self).__init__("dc")
self.set("name", name)
self.set("vmFolder", "vm_folder_ref")
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
if ds_ref:
datastore = DataObject()
datastore.ManagedObjectReference = [ds_ref]
else:
datastore = None
self.set("datastore", datastore)
class Task(ManagedObject):
"""Task class."""
def __init__(self, task_name, state="running", result=None):
super(Task, self).__init__("Task")
info = DataObject()
info.name = task_name
info.state = state
info.result = result
self.set("info", info)
def create_host_network_system():
host_net_system = HostNetworkSystem()
_create_object("HostNetworkSystem", host_net_system)
def create_host_storage_system():
host_storage_system = HostStorageSystem()
_create_object("HostStorageSystem", host_storage_system)
def create_host():
host_system = HostSystem()
_create_object('HostSystem', host_system)
def create_datacenter(name, ds_ref=None):
data_center = Datacenter(name, ds_ref)
_create_object('Datacenter', data_center)
def create_datastore(name, capacity, free):
data_store = Datastore(name, capacity, free)
_create_object('Datastore', data_store)
return data_store.obj
def create_res_pool():
res_pool = ResourcePool()
_create_object('ResourcePool', res_pool)
def create_network():
network = Network()
_create_object('Network', network)
def create_cluster(name, ds_ref):
cluster = ClusterComputeResource(name=name)
cluster._add_host(_get_object_refs("HostSystem")[0])
cluster._add_host(_get_object_refs("HostSystem")[1])
cluster._add_datastore(ds_ref)
cluster._add_root_resource_pool(_get_object_refs("ResourcePool")[0])
_create_object('ClusterComputeResource', cluster)
def create_task(task_name, state="running", result=None):
task = Task(task_name, state, result)
_create_object("Task", task)
return task
def _add_file(file_path):
"""Adds a file reference to the db."""
_db_content["files"].append(file_path)
def _remove_file(file_path):
"""Removes a file reference from the db."""
if _db_content.get("files") is None:
raise exception.NoFilesFound()
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
raise exception.FileNotFound(file_path=file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
for file in _db_content.get("files"):
if file.find(file_path) != -1:
lst_files = _db_content.get("files")
if lst_files and lst_files.count(file):
lst_files.remove(file)
def fake_plug_vifs(*args, **kwargs):
"""Fakes plugging vifs."""
pass
def fake_get_network(*args, **kwargs):
"""Fake get network."""
return {'type': 'fake'}
def get_file(file_path):
"""Check if file exists in the db."""
if _db_content.get("files") is None:
raise exception.NoFilesFound()
return file_path in _db_content.get("files")
def fake_fetch_image(context, image, instance, **kwargs):
"""Fakes fetch image call. Just adds a reference to the db for the file."""
ds_name = kwargs.get("datastore_name")
file_path = kwargs.get("file_path")
ds_file_path = "[" + ds_name + "] " + file_path
_add_file(ds_file_path)
def fake_upload_image(context, image, instance, **kwargs):
"""Fakes the upload of an image."""
pass
def fake_get_vmdk_size_and_properties(context, image_id, instance):
"""Fakes the file size and properties fetch for the image file."""
props = {"vmware_ostype": "otherGuest",
"vmware_adaptertype": "lsiLogic"}
return _FAKE_FILE_SIZE, props
def _get_vm_mdo(vm_ref):
"""Gets the Virtual Machine with the ref from the db."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound(_("There is no VM registered"))
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound(_("Virtual Machine with ref %s is not "
"there") % vm_ref)
return _db_content.get("VirtualMachine")[vm_ref]
class FakeFactory(object):
"""Fake factory class for the suds client."""
def create(self, obj_name):
"""Creates a namespace object."""
return DataObject(obj_name)
class FakeVim(object):
"""Fake VIM Class."""
def __init__(self, protocol="https", host="localhost", trace=None):
"""
Initializes the suds client object, sets the service content
contents and the cookies for the session.
"""
self._session = None
self.client = DataObject()
self.client.factory = FakeFactory()
transport = DataObject()
transport.cookiejar = "Fake-CookieJar"
options = DataObject()
options.transport = transport
self.client.options = options
service_content = self.client.factory.create('ns0:ServiceContent')
service_content.propertyCollector = "PropCollector"
service_content.virtualDiskManager = "VirtualDiskManager"
service_content.fileManager = "FileManager"
service_content.rootFolder = "RootFolder"
service_content.sessionManager = "SessionManager"
about_info = DataObject()
about_info.name = "VMware vCenter Server"
about_info.version = "5.1.0"
service_content.about = about_info
self._service_content = service_content
def get_service_content(self):
return self._service_content
def __repr__(self):
return "Fake VIM Object"
def __str__(self):
return "Fake VIM Object"
def _login(self):
"""Logs in and sets the session object in the db."""
self._session = uuidutils.generate_uuid()
session = DataObject()
session.key = self._session
session.userName = 'sessionUserName'
_db_content['session'][self._session] = session
return session
def _logout(self):
"""Logs out and remove the session object ref from the db."""
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
_("Logging out a session that is invalid or already logged "
"out: %s") % s)
del _db_content['session'][s]
def _terminate_session(self, *args, **kwargs):
"""Terminates a session."""
s = kwargs.get("sessionId")[0]
if s not in _db_content['session']:
return
del _db_content['session'][s]
def _check_session(self):
"""Checks if the session is active."""
if (self._session is None or self._session not in
_db_content['session']):
LOG.debug(_("Session is faulty"))
raise error_util.VimFaultException(
[error_util.FAULT_NOT_AUTHENTICATED],
_("Session Invalid"))
def _session_is_active(self, *args, **kwargs):
try:
self._check_session()
return True
except Exception:
return False
def _create_vm(self, method, *args, **kwargs):
"""Creates and registers a VM object with the Host System."""
config_spec = kwargs.get("config")
ds = _db_content["Datastore"].keys()[0]
host = _db_content["HostSystem"].keys()[0]
vm_dict = {"name": config_spec.name,
"ds": [ds],
"runtime_host": host,
"powerstate": "poweredOff",
"vmPathName": config_spec.files.vmPathName,
"numCpu": config_spec.numCPUs,
"mem": config_spec.memoryMB,
"extra_config": config_spec.extraConfig,
"virtual_device": config_spec.deviceChange}
virtual_machine = VirtualMachine(**vm_dict)
_create_object("VirtualMachine", virtual_machine)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _reconfig_vm(self, method, *args, **kwargs):
"""Reconfigures a VM and sets the properties supplied."""
vm_ref = args[0]
vm_mdo = _get_vm_mdo(vm_ref)
vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _create_copy_disk(self, method, vmdk_file_path):
"""Creates/copies a vmdk file object in the datastore."""
# We need to add/create both .vmdk and .-flat.vmdk files
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_add_file(vmdk_file_path)
_add_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _extend_disk(self, method, size):
"""Extend disk size when create a instance."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _snapshot_vm(self, method):
"""Snapshots a VM. Here we do nothing for faking sake."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _delete_disk(self, method, *args, **kwargs):
"""Deletes .vmdk and -flat.vmdk files corresponding to the VM."""
vmdk_file_path = kwargs.get("name")
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_remove_file(vmdk_file_path)
_remove_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _delete_file(self, method, *args, **kwargs):
"""Deletes a file from the datastore."""
_remove_file(kwargs.get("name"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _just_return(self):
"""Fakes a return."""
return
def _just_return_task(self, method):
"""Fakes a task return."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _clone_vm(self, method, *args, **kwargs):
"""Fakes a VM clone."""
return self._just_return_task(method)
def _unregister_vm(self, method, *args, **kwargs):
"""Unregisters a VM from the Host System."""
vm_ref = args[0]
_get_vm_mdo(vm_ref)
del _db_content["VirtualMachine"][vm_ref]
def _search_ds(self, method, *args, **kwargs):
"""Searches the datastore for a file."""
ds_path = kwargs.get("datastorePath")
if _db_content.get("files", None) is None:
raise exception.NoFilesFound()
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
result = DataObject()
result.path = ds_path
task_mdo = create_task(method, state="success",
result=result)
return task_mdo.obj
task_mdo = create_task(method, "error")
return task_mdo.obj
def _make_dir(self, method, *args, **kwargs):
"""Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if _db_content.get("files", None) is None:
raise exception.NoFilesFound()
_db_content["files"].append(ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
"""Sets power state for the VM."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound(_("No Virtual Machine has been "
"registered yet"))
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound(_("Virtual Machine with ref %s is not "
"there") % vm_ref)
vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
vm_mdo.set("runtime.powerState", pwr_state)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _retrieve_properties_continue(self, method, *args, **kwargs):
"""Continues the retrieve."""
return FakeRetrieveResult()
def _retrieve_properties_cancel(self, method, *args, **kwargs):
"""Cancels the retrieve."""
return None
def _retrieve_properties(self, method, *args, **kwargs):
"""Retrieves properties based on the type."""
spec_set = kwargs.get("specSet")[0]
type = spec_set.propSet[0].type
properties = spec_set.propSet[0].pathSet
if not isinstance(properties, list):
properties = properties.split()
objs = spec_set.objectSet
lst_ret_objs = FakeRetrieveResult()
for obj in objs:
try:
obj_ref = obj.obj
# This means that we are doing a search for the managed
# data objects of the type in the inventory
if obj_ref == "RootFolder":
mdo_refs = _db_content[type]
else:
mdo_refs = [obj_ref]
for mdo_ref in mdo_refs:
mdo = _db_content[type][mdo_ref]
prop_list = []
for prop_name in properties:
prop = Prop(prop_name, mdo.get(prop_name))
prop_list.append(prop)
obj_content = ObjectContent(mdo.obj, prop_list)
lst_ret_objs.add_object(obj_content)
except Exception as exc:
LOG.exception(exc)
continue
return lst_ret_objs
def _add_port_group(self, method, *args, **kwargs):
"""Adds a port group to the host system."""
_host_sk = _db_content["HostSystem"].keys()[0]
host_mdo = _db_content["HostSystem"][_host_sk]
host_mdo._add_port_group(kwargs.get("portgrp"))
def __getattr__(self, attr_name):
if attr_name != "Login":
self._check_session()
if attr_name == "Login":
return lambda *args, **kwargs: self._login()
elif attr_name == "Logout":
self._logout()
elif attr_name == "SessionIsActive":
return lambda *args, **kwargs: self._session_is_active(
*args, **kwargs)
elif attr_name == "TerminateSession":
return lambda *args, **kwargs: self._terminate_session(
*args, **kwargs)
elif attr_name == "CreateVM_Task":
return lambda *args, **kwargs: self._create_vm(attr_name,
*args, **kwargs)
elif attr_name == "ReconfigVM_Task":
return lambda *args, **kwargs: self._reconfig_vm(attr_name,
*args, **kwargs)
elif attr_name == "CreateVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("name"))
elif attr_name == "DeleteDatastoreFile_Task":
return lambda *args, **kwargs: self._delete_file(attr_name,
*args, **kwargs)
elif attr_name == "PowerOnVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "PowerOffVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOff")
elif attr_name == "RebootGuest":
return lambda *args, **kwargs: self._just_return()
elif attr_name == "ResetVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "SuspendVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "suspended")
elif attr_name == "CreateSnapshot_Task":
return lambda *args, **kwargs: self._snapshot_vm(attr_name)
elif attr_name == "CopyVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("destName"))
elif attr_name == "ExtendVirtualDisk_Task":
return lambda *args, **kwargs: self._extend_disk(attr_name,
kwargs.get("size"))
elif attr_name == "Destroy_Task":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "UnregisterVM":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "CloneVM_Task":
return lambda *args, **kwargs: self._clone_vm(attr_name,
*args, **kwargs)
elif attr_name == "Rename_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "SearchDatastore_Task":
return lambda *args, **kwargs: self._search_ds(attr_name,
*args, **kwargs)
elif attr_name == "MakeDirectory":
return lambda *args, **kwargs: self._make_dir(attr_name,
*args, **kwargs)
elif attr_name == "RetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties(
attr_name, *args, **kwargs)
elif attr_name == "ContinueRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_continue(
attr_name, *args, **kwargs)
elif attr_name == "CancelRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_cancel(
attr_name, *args, **kwargs)
elif attr_name == "AcquireCloneTicket":
return lambda *args, **kwargs: self._just_return()
elif attr_name == "AddPortGroup":
return lambda *args, **kwargs: self._add_port_group(attr_name,
*args, **kwargs)
elif attr_name == "RebootHost_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "ShutdownHost_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "PowerDownHostToStandBy_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "PowerUpHostFromStandBy_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "EnterMaintenanceMode_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "ExitMaintenanceMode_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
| SUSE-Cloud/nova | nova/virt/vmwareapi/fake.py | Python | apache-2.0 | 42,540 |
# This file is part of Viper - https://github.com/viper-framework/viper
# See the file 'LICENSE' for copying permission.
import os
import json
import tempfile
from viper.common.abstracts import Module
from viper.common.utils import get_type
from viper.core.session import __sessions__
from pdftools.pdfid import PDFiD, PDFiD2JSON
from peepdf.PDFCore import PDFParser
class PDF(Module):
cmd = 'pdf'
description = 'Parse and analyze PDF documents'
authors = ['Kevin Breen', 'nex']
def __init__(self):
super(PDF, self).__init__()
subparsers = self.parser.add_subparsers(dest='subname')
subparsers.add_parser('id', help='Show general information on the PDF')
parser_streams = subparsers.add_parser('streams', help='Extract stream objects from PDF')
parser_streams.add_argument('-d', '--dump', help='Destination directory to store resource files in')
parser_streams.add_argument('-o', '--open', help='Open a session on the specified resource')
def pdf_id(self):
# Run the parser - Returns an XML DOM Instance.
pdf_data = PDFiD(__sessions__.current.file.path, False, True)
# This converts to string.
# pdf_string = PDFiD2String(pdf_data, True)
# This converts to JSON.
pdf_json = PDFiD2JSON(pdf_data, True)
# Convert from string.
pdf = json.loads(pdf_json)[0]
# Get general info and format.
info = [
['PDF Header', pdf['pdfid']['header']],
['Total Entropy', pdf['pdfid']['totalEntropy']],
['Entropy In Streams', pdf['pdfid']['streamEntropy']],
['Entropy Out Streams', pdf['pdfid']['nonStreamEntropy']],
['Count %% EOF', pdf['pdfid']['countEof']],
['Data After EOF', pdf['pdfid']['countChatAfterLastEof']]
]
# If there are date sections lets get them as well.
dates = pdf['pdfid']['dates']['date']
for date in dates:
info.append([date['name'], date['value']])
# Get streams, counts and format.
streams = []
for stream in pdf['pdfid']['keywords']['keyword']:
streams.append([stream['name'], stream['count']])
self.log('info', "General Info:")
self.log('table', dict(header=['Desc', 'Value'], rows=info))
self.log('info', "Streams & Count:")
self.log('table', dict(header=['Name', 'Count'], rows=streams))
def streams(self):
def get_streams():
# This function is brutally ripped from Brandon Dixon's swf_mastah.py.
# Initialize peepdf parser.
parser = PDFParser()
# Parse currently opened PDF document.
ret, pdf = parser.parse(__sessions__.current.file.path, True, False)
# Generate statistics.
results = []
objects = []
count = 0
object_counter = 1
for i in range(len(pdf.body)):
body = pdf.body[count]
objects = body.objects
for index in objects:
oid = objects[index].id
offset = objects[index].offset
size = objects[index].size
details = objects[index].object
if details.type == 'stream':
decoded_stream = details.decodedStream
result = [
object_counter,
oid,
offset,
size,
get_type(decoded_stream)[:100]
]
# If the stream needs to be dumped or opened, we do it
# and expand the results with the path to the stream dump.
if arg_open or arg_dump:
# If was instructed to dump, we already have a base folder.
if arg_dump:
folder = arg_dump
# Otherwise we juts generate a temporary one.
else:
folder = tempfile.gettempdir()
# Confirm the dump path
if not os.path.exists(folder):
try:
os.makedirs(folder)
except Exception as e:
self.log('error', "Unable to create directory at {0}: {1}".format(folder, e))
return results
else:
if not os.path.isdir(folder):
self.log('error', "You need to specify a folder not a file")
return results
# Dump stream to this path.
# TODO: sometimes there appear to be multiple streams
# with the same object ID. Is that even possible?
# It will cause conflicts.
dump_path = '{0}/{1}_{2}_pdf_stream.bin'.format(folder, __sessions__.current.file.md5, object_counter)
with open(dump_path, 'wb') as handle:
handle.write(decoded_stream.strip())
# Add dump path to the stream attributes.
result.append(dump_path)
# Update list of streams.
results.append(result)
object_counter += 1
count += 1
return results
arg_open = self.args.open
arg_dump = self.args.dump
# Retrieve list of streams.
streams = get_streams()
# Show list of streams.
header = ['#', 'ID', 'Offset', 'Size', 'Type']
if arg_dump or arg_open:
header.append('Dumped To')
self.log('table', dict(header=header, rows=streams))
# If the user requested to open a specific stream, we open a new
# session on it.
if arg_open:
for stream in streams:
if int(arg_open) == int(stream[0]):
__sessions__.new(stream[5])
return
def run(self):
super(PDF, self).run()
if self.args is None:
return
if not __sessions__.is_set():
self.log('error', "No session opened")
return False
if 'PDF' not in __sessions__.current.file.type:
self.log('error', "The opened file doesn't appear to be a PDF document")
return
if self.args.subname == 'id':
self.pdf_id()
elif self.args.subname == 'streams':
self.streams()
else:
self.log('error', 'At least one of the parameters is required')
self.usage()
| postfix/viper-1 | modules/pdf.py | Python | bsd-3-clause | 7,025 |
# -*- coding: utf-8 -*-
from positioning.app import App
def run(config, engine_id):
app = App(config, engine_id)
app.start_engine()
app.run()
| maveron58/indiana | positioning/runner.py | Python | mit | 157 |
##########################################
# File: util.py
# Author: Wang Zixu
# Co-Author: CHEN Zhihan
# Last modified: Jan 17, 2017
##########################################
import copy
from PIL import Image, ImageDraw
# Debug echo flag.
DEBUG = False
# True represents light pixels and False represents dark pixels in PIL.
LIGHT = True
DARK = False
def genImage(bitmap, width, filename):
'''
Generate image corresponding to the input bitmap
with specified width and filename.
'''
# New image in black-white mode initialized with white.
img = Image.new('1', (width, width), 'white')
drw = ImageDraw.Draw(img)
# Normalized pixel width.
pwidth = width // len(bitmap)
for j in range(width):
# Normalized j coordinate in bitmap
normalj = j // pwidth
for i in range(width):
# Normalized i coordinate in bitmap
normali = i // pwidth
if normalj < len(bitmap) and normali < len(bitmap):
# Draw pixel.
drw.point((i, j), fill=bitmap[normalj][normali])
img.save(filename)
def transpose(mat):
'''Transpose a matrix'''
res = [[mat[j][i] for j in range(len(mat))] for i in range(len(mat[0]))]
return res
def copyFrom(src, dst, top, left):
'''
Copy the content of matrix src into matrix dst.
The top-left corner of src is positioned at (left, top)
in dst.
'''
res = copy.deepcopy(dst)
for j in range(len(src)):
for i in range(len(src[0])):
res[top+j][left+i] = src[j][i]
return res
def getPart(matrix,top,left,width=2,height=4):
result = [[False for i in range(width)] for j in range(height)]
for j in range(height):
for i in range(width):
result[j][i] = matrix[top+j][left+i]
return result
def logicAnd(mat1, mat2):
'''
Matrix-wise and.
Dark and dark -> dark
Light and light -> light
Dark and light -> light
Light and dark -> light
'''
res = [[True for i in range(len(mat1[0]))] for j in range(len(mat1))]
for j in range(len(mat1)):
for i in range(len(mat1[0])):
res[j][i] = mat1[j][i] or mat2[j][i]
return res
def logicOr(mat1, mat2):
"""
B + B -> B
B + W -> B
W + W -> W
"""
res = [[False for i in range(len(mat1[0]))] for j in range(len(mat1))]
for j in range(len(mat1)):
for i in range(len(mat1[0])):
res[j][i] = mat1[j][i] and mat2[j][i]
return res
def logicNot(mat1):
res = [[False for i in range(len(mat1[0]))] for j in range(len(mat1))]
for j in range(len(mat1)):
for i in range(len(mat1[0])):
res[j][i] = not mat1[j][i]
return res
def logicXor(mat1, mat2):
'''
Matrix-wise xor.
Dark xor dark -> light
Light xor light -> light
Dark xor light -> dark
Light xor dark -> dark
'''
res = [[True for i in range(len(mat1[0]))] for j in range(len(mat1))]
for j in range(len(mat1)):
for i in range(len(mat1[0])):
res[j][i] = mat1[j][i] == mat2[j][i]
return res
def _timSeq(len, vertical=False):
'''
Generate a horizontal, unless specified vertical
timing sequence with alternating dark and light
pixels with length len.
'''
res = [[i % 2 for i in range(len)]]
if vertical:
res = transpose(res)
return res
# Initialize pre-defined tool matrices.
# Finder pattern.
_finder = copyFrom(
copyFrom(
[[DARK for i in range(3)] for j in range(3)],
[[LIGHT for i in range(5)] for j in range(5)],
1, 1
),
[[DARK for i in range(7)] for j in range(7)], 1, 1
)
# Alignment pattern. Not used in version 1.
_align = copyFrom(
copyFrom(
[[DARK]],
[[LIGHT for i in range(3)] for j in range(3)], 1, 1
),
[[DARK for i in range(5)] for j in range(5)], 1, 1
)
# Version 1 QR code template with finder patterns and timing sequences.
ver1Temp = [[LIGHT for i in range(21)] for j in range(21)]
ver1Temp = copyFrom(_finder, ver1Temp, 0, 0)
ver1Temp = copyFrom(_finder, ver1Temp, 14, 0)
ver1Temp = copyFrom(_finder, ver1Temp, 0, 14)
ver1Temp = copyFrom(_timSeq(5), ver1Temp, 6, 8)
ver1Temp = copyFrom(_timSeq(5, vertical=True), ver1Temp, 8, 6)
ver1Temp = copyFrom([[DARK]], ver1Temp, 13, 8)
# Data area mask to avoid applying masks to functional area.
_dataAreaMask = [[DARK for i in range(21)] for j in range(21)]
_dataAreaMask = copyFrom([[LIGHT for i in range(9)] for j in range(9)],
_dataAreaMask, 0, 0)
_dataAreaMask = copyFrom([[LIGHT for i in range(9)] for j in range(8)],
_dataAreaMask, 13, 0)
_dataAreaMask = copyFrom([[LIGHT for i in range(8)] for j in range(9)],
_dataAreaMask, 0, 13)
_dataAreaMask = copyFrom([[LIGHT for i in range(4)]], _dataAreaMask, 6, 9)
_dataAreaMask = copyFrom([[LIGHT] for i in range(4)], _dataAreaMask, 9, 6)
# Data masks defined in QR standard.
def _maskIsDark(index, i, j):
if index == 0:
policy = (i+j) % 2
elif index == 1:
policy = j % 2
elif index == 2:
policy = i % 3
elif index == 3:
policy = (i+j) % 3
elif index == 4:
policy = (j//2 + i//3) % 2
elif index == 5:
policy = (i*j) % 2+(i*j) % 3
elif index == 6:
policy = ((i*j) % 2+(i*j) % 3) % 2
elif index == 7:
policy = ((i+j) % 2+(i*j) % 3) % 2
return policy == 0
_maskList = [
[
[
DARK if _maskIsDark(c, i, j)
else LIGHT for i in range(21)
] for j in range(21)
] for c in range(8)
]
dataMasks = [logicAnd(_dataAreaMask, mask) for mask in _maskList]
# Generate images for predefined patterns for debug use.
if DEBUG:
genImage(_finder, 70, 'finder.jpg')
genImage(_align, 50, 'alignment.jpg')
genImage(ver1Temp, 210, 'version1.jpg')
genImage(_dataAreaMask, 210, 'dataAreaMask.jpg')
for i in range(8):
genImage(dataMasks[i], 210, 'mask'+str(i)+'.jpg')
| LaytonW/qrcode | lib/util.py | Python | mit | 6,276 |
import sys
class ExceptionHook:
instance = None
def __call__(self, *args, **kwargs):
if self.instance is None:
from IPython.core import ultratb
self.instance = ultratb.FormattedTB(mode='Plain',
color_scheme='Linux', call_pdb=1)
return self.instance(*args, **kwargs)
sys.excepthook = ExceptionHook()
| jasonleaster/LeetCode | crash_python.py | Python | gpl-2.0 | 367 |
from glob import glob
import h5py as hdf
from numpy import where
files = glob('*Oii.hdf5')
outFile = open('buzzard_truth.txt', 'w')
for f in files:
print f
with hdf.File(f, 'r') as f:
dset = f[f.keys()[0]]
ra = dset['RA']
dec = dset['DEC']
if ra.max() > 300. and ra.min() < 100:
x = where(ra < 200)
y = where(ra > 200)
outFile.writelines('%s %s %s %s %s\n' % (f.keys()[0], ra[x].max(),
ra[y].min(), dec.max(), dec.min()))
else:
outFile.writelines('%s %s %s %s %s\n' % (f.keys()[0], ra.max(),
ra.min(), dec.max(), dec.min()))
f.close()
outFile.close()
| boada/desCluster | data/buzzard_v1.0/allbands/truth/find_RADEX.py | Python | mit | 692 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('arachne_pygmy')
mobileTemplate.setLevel(4)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(0.5)
mobileTemplate.setSocialGroup("krayt cult")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_angler.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_damage_poison_4')
attacks.add('bm_defensive_4')
attacks.add('bm_puncture_2')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('bone_gnasher', mobileTemplate)
return | ProjectSWGCore/NGECore2 | scripts/mobiles/tatooine/bone_gnasher.py | Python | lgpl-3.0 | 1,409 |
#!/d/Bin/Python/python.exe
# -*- coding: utf-8 -*-
#
#
# $Date: 2005/04/02 07:29:46 $, by $Author: ivan $, $Revision: 1.1 $
#
from testSPARQL import ns_rdf
from testSPARQL import ns_rdfs
from testSPARQL import ns_dc
from testSPARQL import ns_foaf
from testSPARQL import ns_ns
from testSPARQL import ns_book
from rdflib.Literal import Literal
from rdflib.sparql.sparqlOperators import lt, ge
import datetime
from rdflib.sparql.graphPattern import GraphPattern
thresholdDate = datetime.date(2005,01,01)
rdfData ="""<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
xmlns:ns = "http://example.org/ns#"
xmlns:book = "http://example.org/book"
>
<rdf:Description rdf:ID="book1">
<dc:title>SPARQL Tutorial</dc:title>
<ns:price rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">42</ns:price>
</rdf:Description>
<rdf:Description rdf:ID="book2">
<dc:title>The Semantic Web</dc:title>
<ns:price rdf:datatype="http://www.w3.org/2001/XMLSchema#integer">23</ns:price>
</rdf:Description>
</rdf:RDF>
"""
select = ["?title", "?price"]
pattern = GraphPattern([("?x", ns_dc["title"],"?title")])
optional = GraphPattern([("?x",ns_ns["price"],"?price")])
optional.addConstraint(lt("?price",30))
tripleStore = None
expected = '''
?title: SPARQL Tutorial
?price: None
?title: The Semantic Web
?price: 23
'''
| MjAbuz/watchdog | vendor/rdflib-2.4.0/test/sparql/QueryTests/Test5_2.py | Python | agpl-3.0 | 1,627 |
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
from ..common.types import Readables, Writables, Descriptors
# Since 3.4.0
class DescriptorsHandlerMixin:
"""DescriptorsHandlerMixin provides abstraction used by several core HTTP modules
include web and proxy plugins. By using DescriptorsHandlerMixin, class
becomes complaint with core event loop."""
# @abstractmethod
async def get_descriptors(self) -> Descriptors:
"""Implementations must return a list of descriptions that they wish to
read from and write into."""
return [], [] # pragma: no cover
# @abstractmethod
async def write_to_descriptors(self, w: Writables) -> bool:
"""Implementations must now write/flush data over the socket.
Note that buffer management is in-build into the connection classes.
Hence implementations MUST call
:meth:`~proxy.core.connection.connection.TcpConnection.flush`
here, to send any buffered data over the socket.
"""
return False # pragma: no cover
# @abstractmethod
async def read_from_descriptors(self, r: Readables) -> bool:
"""Implementations must now read data over the socket."""
return False # pragma: no cover
| abhinavsingh/proxy.py | proxy/http/descriptors.py | Python | bsd-3-clause | 1,550 |
# GENERATED FILE, do not edit by hand
# Source: test/jinja2.test_pytorch.py
from __future__ import print_function, division
import PyTorch
import numpy
import inspect
from test.test_helpers import myeval, myexec
def test_pytorchLong():
PyTorch.manualSeed(123)
numpy.random.seed(123)
LongTensor = PyTorch.LongTensor
D = PyTorch.LongTensor(5, 3).fill(1)
print('D', D)
D[2][2] = 4
print('D', D)
D[3].fill(9)
print('D', D)
D.narrow(1, 2, 1).fill(0)
print('D', D)
print(PyTorch.LongTensor(3, 4).bernoulli())
print(PyTorch.LongTensor(3, 4).geometric())
print(PyTorch.LongTensor(3, 4).geometric())
PyTorch.manualSeed(3)
print(PyTorch.LongTensor(3, 4).geometric())
PyTorch.manualSeed(3)
print(PyTorch.LongTensor(3, 4).geometric())
print(type(PyTorch.LongTensor(2, 3)))
size = PyTorch.LongStorage(2)
size[0] = 4
size[1] = 3
D.resize(size)
print('D after resize:\n', D)
print('resize1d', PyTorch.LongTensor().resize1d(3).fill(1))
print('resize2d', PyTorch.LongTensor().resize2d(2, 3).fill(1))
print('resize', PyTorch.LongTensor().resize(size).fill(1))
D = PyTorch.LongTensor(size).geometric()
# def myeval(expr):
# print(expr, ':', eval(expr))
# def myexec(expr):
# print(expr)
# exec(expr)
myeval('LongTensor(3,2).nElement()')
myeval('LongTensor().nElement()')
myeval('LongTensor(1).nElement()')
A = LongTensor(3, 4).geometric(0.9)
myeval('A')
myexec('A += 3')
myeval('A')
myexec('A *= 3')
myeval('A')
myexec('A -= 3')
myeval('A')
print('A //= 3')
A //= 3
myeval('A')
myeval('A + 5')
myeval('A - 5')
myeval('A * 5')
print('A // 2')
A // 2
B = LongTensor().resizeAs(A).geometric(0.9)
myeval('B')
myeval('A + B')
myeval('A - B')
myexec('A += B')
myeval('A')
myexec('A -= B')
myeval('A')
def test_pytorch_Long_constructors():
LongTensor = PyTorch.LongTensor
a = LongTensor(3, 2, 5)
assert(len(a.size()) == 3)
a = LongTensor(3, 2, 5, 6)
assert(len(a.size()) == 4)
def test_Pytorch_Long_operator_plus():
LongTensor = PyTorch.LongTensor
a = LongTensor(3, 2, 5)
b = LongTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
res = a + b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] + b.storage()[i])) < 0.000001)
def test_Pytorch_Long_operator_plusequals():
LongTensor = PyTorch.LongTensor
a = LongTensor(3, 2, 5)
b = LongTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
res = a.clone()
res += b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] + b.storage()[i])) < 0.000001)
def test_Pytorch_Long_operator_minus():
LongTensor = PyTorch.LongTensor
a = LongTensor(3, 2, 5)
b = LongTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
res = a - b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] - b.storage()[i])) < 0.000001)
def test_Pytorch_Long_operator_minusequals():
LongTensor = PyTorch.LongTensor
a = LongTensor(3, 2, 5)
b = LongTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
res = a.clone()
res -= b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] - b.storage()[i])) < 0.000001)
def test_Pytorch_Long_cmul():
LongTensor = PyTorch.LongTensor
a = LongTensor(3, 2, 5)
b = LongTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
res = a.clone() # .cmul(b)
res.cmul(b)
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] * b.storage()[i])) < 0.000001)
def test_Pytorch_Long_operator_div():
LongTensor = PyTorch.LongTensor
a = LongTensor(3, 2, 5)
b = LongTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
# res = a / b # whilst this should proably be allowed/implemented, it's not yet...
# for i in range(3*2*5):
# assert(abs(res.storage()[i] - (a.storage()[i] / b.storage()[i])) < 0.00001)
res = a // b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] // b.storage()[i])) < 0.00001)
def test_Pytorch_Long_operator_divequals():
LongTensor = PyTorch.LongTensor
a = LongTensor(3, 2, 5)
b = LongTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
res = a.clone()
res //= b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] // b.storage()[i])) < 0.00001)
def test_pytorchFloat():
PyTorch.manualSeed(123)
numpy.random.seed(123)
FloatTensor = PyTorch.FloatTensor
A = numpy.random.rand(6).reshape(3, 2).astype(numpy.float32)
B = numpy.random.rand(8).reshape(2, 4).astype(numpy.float32)
C = A.dot(B)
print('C', C)
print('calling .asTensor...')
tensorA = PyTorch.asFloatTensor(A)
tensorB = PyTorch.asFloatTensor(B)
print(' ... asTensor called')
print('tensorA', tensorA)
tensorA.set2d(1, 1, 56.4)
tensorA.set2d(2, 0, 76.5)
print('tensorA', tensorA)
print('A', A)
print('add 5 to tensorA')
tensorA += 5
print('tensorA', tensorA)
print('A', A)
print('add 7 to tensorA')
tensorA2 = tensorA + 7
print('tensorA2', tensorA2)
print('tensorA', tensorA)
tensorAB = tensorA * tensorB
print('tensorAB', tensorAB)
print('A.dot(B)', A.dot(B))
print('tensorA[2]', tensorA[2])
D = PyTorch.FloatTensor(5, 3).fill(1)
print('D', D)
D[2][2] = 4
print('D', D)
D[3].fill(9)
print('D', D)
D.narrow(1, 2, 1).fill(0)
print('D', D)
print(PyTorch.FloatTensor(3, 4).uniform())
print(PyTorch.FloatTensor(3, 4).normal())
print(PyTorch.FloatTensor(3, 4).cauchy())
print(PyTorch.FloatTensor(3, 4).exponential())
print(PyTorch.FloatTensor(3, 4).logNormal())
print(PyTorch.FloatTensor(3, 4).bernoulli())
print(PyTorch.FloatTensor(3, 4).geometric())
print(PyTorch.FloatTensor(3, 4).geometric())
PyTorch.manualSeed(3)
print(PyTorch.FloatTensor(3, 4).geometric())
PyTorch.manualSeed(3)
print(PyTorch.FloatTensor(3, 4).geometric())
print(type(PyTorch.FloatTensor(2, 3)))
size = PyTorch.LongStorage(2)
size[0] = 4
size[1] = 3
D.resize(size)
print('D after resize:\n', D)
print('resize1d', PyTorch.FloatTensor().resize1d(3).fill(1))
print('resize2d', PyTorch.FloatTensor().resize2d(2, 3).fill(1))
print('resize', PyTorch.FloatTensor().resize(size).fill(1))
D = PyTorch.FloatTensor(size).geometric()
# def myeval(expr):
# print(expr, ':', eval(expr))
# def myexec(expr):
# print(expr)
# exec(expr)
myeval('FloatTensor(3,2).nElement()')
myeval('FloatTensor().nElement()')
myeval('FloatTensor(1).nElement()')
A = FloatTensor(3, 4).geometric(0.9)
myeval('A')
myexec('A += 3')
myeval('A')
myexec('A *= 3')
myeval('A')
myexec('A -= 3')
myeval('A')
print('A /= 3')
A /= 3
myeval('A')
myeval('A + 5')
myeval('A - 5')
myeval('A * 5')
print('A / 2')
A / 2
B = FloatTensor().resizeAs(A).geometric(0.9)
myeval('B')
myeval('A + B')
myeval('A - B')
myexec('A += B')
myeval('A')
myexec('A -= B')
myeval('A')
def test_pytorch_Float_constructors():
FloatTensor = PyTorch.FloatTensor
a = FloatTensor(3, 2, 5)
assert(len(a.size()) == 3)
a = FloatTensor(3, 2, 5, 6)
assert(len(a.size()) == 4)
def test_Pytorch_Float_operator_plus():
FloatTensor = PyTorch.FloatTensor
a = FloatTensor(3, 2, 5)
b = FloatTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a + b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] + b.storage()[i])) < 0.000001)
def test_Pytorch_Float_operator_plusequals():
FloatTensor = PyTorch.FloatTensor
a = FloatTensor(3, 2, 5)
b = FloatTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a.clone()
res += b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] + b.storage()[i])) < 0.000001)
def test_Pytorch_Float_operator_minus():
FloatTensor = PyTorch.FloatTensor
a = FloatTensor(3, 2, 5)
b = FloatTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a - b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] - b.storage()[i])) < 0.000001)
def test_Pytorch_Float_operator_minusequals():
FloatTensor = PyTorch.FloatTensor
a = FloatTensor(3, 2, 5)
b = FloatTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a.clone()
res -= b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] - b.storage()[i])) < 0.000001)
def test_Pytorch_Float_cmul():
FloatTensor = PyTorch.FloatTensor
a = FloatTensor(3, 2, 5)
b = FloatTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a.clone() # .cmul(b)
res.cmul(b)
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] * b.storage()[i])) < 0.000001)
# def test_Pytorch_Float_abs():
# FloatTensor = PyTorch.FloatTensor
# a = FloatTensor(3,2,5)
# # a.uniform()
# # res = a.clone()
# res.abs()
# for i in range(3*2*5):
# # assert(abs(res.storage()[i] - (torch.abs(a.storage()[i]))) < 0.000001)
# # def test_Pytorch_Float_tanh():
# FloatTensor = PyTorch.FloatTensor
# a = FloatTensor(3,2,5)
# # a.uniform()
# # res = a.clone()
# res.tanh()
# for i in range(3*2*5):
# # assert(abs(res.storage()[i] - (torch.tanh(a.storage()[i]))) < 0.000001)
# # def test_Pytorch_Float_sigmoid():
# FloatTensor = PyTorch.FloatTensor
# a = FloatTensor(3,2,5)
# # a.uniform()
# # res = a.clone()
# res.sigmoid()
# for i in range(3*2*5):
# # assert(abs(res.storage()[i] - (torch.sigmoid(a.storage()[i]))) < 0.000001)
# # def test_Pytorch_Float_neg():
# FloatTensor = PyTorch.FloatTensor
# a = FloatTensor(3,2,5)
# # a.uniform()
# # res = a.clone()
# res.neg()
# for i in range(3*2*5):
# # assert(abs(res.storage()[i] - (torch.neg(a.storage()[i]))) < 0.000001)
# # def test_Pytorch_Float_cinv():
# FloatTensor = PyTorch.FloatTensor
# a = FloatTensor(3,2,5)
# # a.uniform()
# # res = a.clone()
# res.cinv()
# for i in range(3*2*5):
# # assert(abs(res.storage()[i] - (torch.cinv(a.storage()[i]))) < 0.000001)
#
def test_Pytorch_Float_operator_div():
FloatTensor = PyTorch.FloatTensor
a = FloatTensor(3, 2, 5)
b = FloatTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a / b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] / b.storage()[i])) < 0.00001)
def test_Pytorch_Float_operator_divequals():
FloatTensor = PyTorch.FloatTensor
a = FloatTensor(3, 2, 5)
b = FloatTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a.clone()
res /= b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] / b.storage()[i])) < 0.00001)
def test_pytorchDouble():
PyTorch.manualSeed(123)
numpy.random.seed(123)
DoubleTensor = PyTorch.DoubleTensor
D = PyTorch.DoubleTensor(5, 3).fill(1)
print('D', D)
D[2][2] = 4
print('D', D)
D[3].fill(9)
print('D', D)
D.narrow(1, 2, 1).fill(0)
print('D', D)
print(PyTorch.DoubleTensor(3, 4).uniform())
print(PyTorch.DoubleTensor(3, 4).normal())
print(PyTorch.DoubleTensor(3, 4).cauchy())
print(PyTorch.DoubleTensor(3, 4).exponential())
print(PyTorch.DoubleTensor(3, 4).logNormal())
print(PyTorch.DoubleTensor(3, 4).bernoulli())
print(PyTorch.DoubleTensor(3, 4).geometric())
print(PyTorch.DoubleTensor(3, 4).geometric())
PyTorch.manualSeed(3)
print(PyTorch.DoubleTensor(3, 4).geometric())
PyTorch.manualSeed(3)
print(PyTorch.DoubleTensor(3, 4).geometric())
print(type(PyTorch.DoubleTensor(2, 3)))
size = PyTorch.LongStorage(2)
size[0] = 4
size[1] = 3
D.resize(size)
print('D after resize:\n', D)
print('resize1d', PyTorch.DoubleTensor().resize1d(3).fill(1))
print('resize2d', PyTorch.DoubleTensor().resize2d(2, 3).fill(1))
print('resize', PyTorch.DoubleTensor().resize(size).fill(1))
D = PyTorch.DoubleTensor(size).geometric()
# def myeval(expr):
# print(expr, ':', eval(expr))
# def myexec(expr):
# print(expr)
# exec(expr)
myeval('DoubleTensor(3,2).nElement()')
myeval('DoubleTensor().nElement()')
myeval('DoubleTensor(1).nElement()')
A = DoubleTensor(3, 4).geometric(0.9)
myeval('A')
myexec('A += 3')
myeval('A')
myexec('A *= 3')
myeval('A')
myexec('A -= 3')
myeval('A')
print('A /= 3')
A /= 3
myeval('A')
myeval('A + 5')
myeval('A - 5')
myeval('A * 5')
print('A / 2')
A / 2
B = DoubleTensor().resizeAs(A).geometric(0.9)
myeval('B')
myeval('A + B')
myeval('A - B')
myexec('A += B')
myeval('A')
myexec('A -= B')
myeval('A')
def test_pytorch_Double_constructors():
DoubleTensor = PyTorch.DoubleTensor
a = DoubleTensor(3, 2, 5)
assert(len(a.size()) == 3)
a = DoubleTensor(3, 2, 5, 6)
assert(len(a.size()) == 4)
def test_Pytorch_Double_operator_plus():
DoubleTensor = PyTorch.DoubleTensor
a = DoubleTensor(3, 2, 5)
b = DoubleTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a + b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] + b.storage()[i])) < 0.000001)
def test_Pytorch_Double_operator_plusequals():
DoubleTensor = PyTorch.DoubleTensor
a = DoubleTensor(3, 2, 5)
b = DoubleTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a.clone()
res += b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] + b.storage()[i])) < 0.000001)
def test_Pytorch_Double_operator_minus():
DoubleTensor = PyTorch.DoubleTensor
a = DoubleTensor(3, 2, 5)
b = DoubleTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a - b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] - b.storage()[i])) < 0.000001)
def test_Pytorch_Double_operator_minusequals():
DoubleTensor = PyTorch.DoubleTensor
a = DoubleTensor(3, 2, 5)
b = DoubleTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a.clone()
res -= b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] - b.storage()[i])) < 0.000001)
def test_Pytorch_Double_cmul():
DoubleTensor = PyTorch.DoubleTensor
a = DoubleTensor(3, 2, 5)
b = DoubleTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a.clone() # .cmul(b)
res.cmul(b)
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] * b.storage()[i])) < 0.000001)
# def test_Pytorch_Double_abs():
# DoubleTensor = PyTorch.DoubleTensor
# a = DoubleTensor(3,2,5)
# # a.uniform()
# # res = a.clone()
# res.abs()
# for i in range(3*2*5):
# # assert(abs(res.storage()[i] - (torch.abs(a.storage()[i]))) < 0.000001)
# # def test_Pytorch_Double_tanh():
# DoubleTensor = PyTorch.DoubleTensor
# a = DoubleTensor(3,2,5)
# # a.uniform()
# # res = a.clone()
# res.tanh()
# for i in range(3*2*5):
# # assert(abs(res.storage()[i] - (torch.tanh(a.storage()[i]))) < 0.000001)
# # def test_Pytorch_Double_sigmoid():
# DoubleTensor = PyTorch.DoubleTensor
# a = DoubleTensor(3,2,5)
# # a.uniform()
# # res = a.clone()
# res.sigmoid()
# for i in range(3*2*5):
# # assert(abs(res.storage()[i] - (torch.sigmoid(a.storage()[i]))) < 0.000001)
# # def test_Pytorch_Double_neg():
# DoubleTensor = PyTorch.DoubleTensor
# a = DoubleTensor(3,2,5)
# # a.uniform()
# # res = a.clone()
# res.neg()
# for i in range(3*2*5):
# # assert(abs(res.storage()[i] - (torch.neg(a.storage()[i]))) < 0.000001)
# # def test_Pytorch_Double_cinv():
# DoubleTensor = PyTorch.DoubleTensor
# a = DoubleTensor(3,2,5)
# # a.uniform()
# # res = a.clone()
# res.cinv()
# for i in range(3*2*5):
# # assert(abs(res.storage()[i] - (torch.cinv(a.storage()[i]))) < 0.000001)
#
def test_Pytorch_Double_operator_div():
DoubleTensor = PyTorch.DoubleTensor
a = DoubleTensor(3, 2, 5)
b = DoubleTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a / b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] / b.storage()[i])) < 0.00001)
def test_Pytorch_Double_operator_divequals():
DoubleTensor = PyTorch.DoubleTensor
a = DoubleTensor(3, 2, 5)
b = DoubleTensor(3, 2, 5)
a.uniform()
b.uniform()
res = a.clone()
res /= b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] / b.storage()[i])) < 0.00001)
def test_pytorchByte():
PyTorch.manualSeed(123)
numpy.random.seed(123)
ByteTensor = PyTorch.ByteTensor
D = PyTorch.ByteTensor(5, 3).fill(1)
print('D', D)
D[2][2] = 4
print('D', D)
D[3].fill(9)
print('D', D)
D.narrow(1, 2, 1).fill(0)
print('D', D)
print(PyTorch.ByteTensor(3, 4).bernoulli())
print(PyTorch.ByteTensor(3, 4).geometric())
print(PyTorch.ByteTensor(3, 4).geometric())
PyTorch.manualSeed(3)
print(PyTorch.ByteTensor(3, 4).geometric())
PyTorch.manualSeed(3)
print(PyTorch.ByteTensor(3, 4).geometric())
print(type(PyTorch.ByteTensor(2, 3)))
size = PyTorch.LongStorage(2)
size[0] = 4
size[1] = 3
D.resize(size)
print('D after resize:\n', D)
print('resize1d', PyTorch.ByteTensor().resize1d(3).fill(1))
print('resize2d', PyTorch.ByteTensor().resize2d(2, 3).fill(1))
print('resize', PyTorch.ByteTensor().resize(size).fill(1))
D = PyTorch.ByteTensor(size).geometric()
# def myeval(expr):
# print(expr, ':', eval(expr))
# def myexec(expr):
# print(expr)
# exec(expr)
myeval('ByteTensor(3,2).nElement()')
myeval('ByteTensor().nElement()')
myeval('ByteTensor(1).nElement()')
A = ByteTensor(3, 4).geometric(0.9)
myeval('A')
myexec('A += 3')
myeval('A')
myexec('A *= 3')
myeval('A')
myeval('A')
print('A //= 3')
A //= 3
myeval('A')
myeval('A + 5')
myeval('A * 5')
print('A // 2')
A // 2
B = ByteTensor().resizeAs(A).geometric(0.9)
myeval('B')
myeval('A + B')
myexec('A += B')
myeval('A')
def test_pytorch_Byte_constructors():
ByteTensor = PyTorch.ByteTensor
a = ByteTensor(3, 2, 5)
assert(len(a.size()) == 3)
a = ByteTensor(3, 2, 5, 6)
assert(len(a.size()) == 4)
def test_Pytorch_Byte_operator_plus():
ByteTensor = PyTorch.ByteTensor
a = ByteTensor(3, 2, 5)
b = ByteTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
res = a + b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] + b.storage()[i])) < 0.000001)
def test_Pytorch_Byte_operator_plusequals():
ByteTensor = PyTorch.ByteTensor
a = ByteTensor(3, 2, 5)
b = ByteTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
res = a.clone()
res += b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] + b.storage()[i])) < 0.000001)
def test_Pytorch_Byte_cmul():
ByteTensor = PyTorch.ByteTensor
a = ByteTensor(3, 2, 5)
b = ByteTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
res = a.clone() # .cmul(b)
res.cmul(b)
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - ((a.storage()[i] * b.storage()[i])) % 256) < 0.000001)
def test_Pytorch_Byte_operator_div():
ByteTensor = PyTorch.ByteTensor
a = ByteTensor(3, 2, 5)
b = ByteTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
# res = a / b # whilst this should proably be allowed/implemented, it's not yet...
# for i in range(3*2*5):
# assert(abs(res.storage()[i] - (a.storage()[i] / b.storage()[i])) < 0.00001)
res = a // b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] // b.storage()[i])) < 0.00001)
def test_Pytorch_Byte_operator_divequals():
ByteTensor = PyTorch.ByteTensor
a = ByteTensor(3, 2, 5)
b = ByteTensor(3, 2, 5)
a.geometric(0.9)
b.geometric(0.9)
res = a.clone()
res //= b
for i in range(3 * 2 * 5):
assert(abs(res.storage()[i] - (a.storage()[i] // b.storage()[i])) < 0.00001)
if __name__ == '__main__':
test_pytorchLong()
test_pytorchFloat()
test_pytorchDouble()
test_pytorchByte()
| hughperkins/pytorch | test/test_pytorch.py | Python | bsd-2-clause | 21,095 |
import RPi.GPIO as GPIO
ReedPin = 11
LedPin = 12
def setup():
GPIO.setmode(GPIO.BOARD) # Numbers GPIOs by physical location
GPIO.setup(LedPin, GPIO.OUT) # Set LedPin's mode is output
GPIO.setup(ReedPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.output(LedPin, GPIO.LOW) # Set LedPin high(+3.3V) to off led
def switchLed(channel):
if GPIO.input(11):
print "Magnet detected - LED on!"
GPIO.output(LedPin,GPIO.HIGH)
else:
print "No magnet detected - LED off!"
GPIO.output(LedPin,GPIO.LOW)
def loop():
GPIO.add_event_detect(ReedPin,GPIO.BOTH, callback=switchLed, bouncetime=20)
while True:
pass
def destroy():
GPIO.output(LedPin, GPIO.LOW) # led off
GPIO.cleanup() # Release resource
if __name__ == '__main__': # Program start from here
setup()
try:
loop()
except KeyboardInterrupt: # When 'Ctrl+C' is pressed, the child program destroy() will be executed.
destroy()
| bicard/raspberrypi | quad-store-sensors/37in1/reed-and-mini-reed-switch-rgb-led-smd.py | Python | gpl-3.0 | 943 |
# Copyright (c) 2010 by Cisco Systems, Inc.
"""
Show concurrent processes for a single instmake log.
"""
# The Python libraries that we need
from instmakelib import instmake_log as LOG
import sys
import getopt
from instmakelib import concurrency
description = "Show concurrent-process stats."
def usage():
print "conprocs:", description
print " [--non-make|--only-make|--all]: DEFAULT=--non-make"
print " [--timeline] show process timeline (always shows ALL processes)"
print " [--tools] summarize tools in use during each -j chunk."
print " [--procs] show processes during each -j chunk."
print " [--procs-j=N] show processes during -jN chunk."
def report_the_procs(conprocs, jobslot):
print "=" * 80
pids = conprocs.PIDsForJ(jobslot)
for pid in pids:
conprocs.Rec(pid).Print()
def report_the_tools(conprocs, jobslot):
tools = conprocs.ToolsForJ(jobslot)
# print "NUMTOOLS", len(tools)
stats = {}
for tool in tools:
if stats.has_key(tool):
stats[tool] += 1
else:
stats[tool] = 1
sorted_tools = stats.keys()
sorted_tools.sort()
i = 1
for tool in sorted_tools:
num = stats[tool]
ext = "s"
if num == 1:
ext = ""
print "%d. %-30s %6d time%s" % (i, tool, num, ext)
i += 1
print
def report(log_file_names, args):
mode = concurrency.NON_MAKE
show_timeline = 0
show_tools = 0
show_procs = 0
show_procs_j = []
# We only accept one log file
if len(log_file_names) != 1:
sys.exit("'conprocs' report uses one log file.")
else:
log_file_name = log_file_names[0]
optstring = ""
longopts = ["non-make", "only-make", "all", "timeline", "tools", "procs",
"procs-j="]
try:
opts, args = getopt.getopt(args, optstring, longopts)
except getopt.GetoptError:
usage()
sys.exit(1)
for opt, arg in opts:
if opt == "--non-make":
mode = concurrency.NON_MAKE
elif opt == "--only-make":
mode = concurrency.ONLY_MAKE
elif opt == "--all":
mode = concurrency.ALL
elif opt == "--timeline":
show_timeline = 1
elif opt == "--tools":
show_tools = 1
elif opt == "--procs":
show_procs = 1
elif opt == "--procs-j":
try:
show_procs_j.append(int(arg))
except ValueError:
sys.exit("--procs-j accepts an integer value")
else:
assert 0, "Unexpected option %s" % (opt,)
keep_recs_flag = show_procs or show_procs_j
conprocs = concurrency.Concurrency(log_file_name, mode,
show_timeline, show_tools, keep_recs=keep_recs_flag)
# Add an extra title line.
if mode == concurrency.NON_MAKE:
title = "Concurrent Non-Make Processes During Build"
elif mode == concurrency.ONLY_MAKE:
title = "Concurrent Make Processes During Build"
elif mode == concurrency.ALL:
title = "Concurrent Processes (Make and Non-Make) During Build"
else:
assert 0, "Mode %s not expected" % (mode,)
print title
top_rec = conprocs.TopRecord()
ovtime = top_rec.diff_times[top_rec.REAL_TIME]
# Find the total number of processes.
unique_ids = {}
for jobslot in range(conprocs.NumJobSlots()):
for ID in conprocs.IDsForJ(jobslot):
unique_ids[ID] = None
tot_procs = len(unique_ids.keys())
del unique_ids
print
print "Note: SUM(NUM PROCS) > Total Processes Considered, and"
print " SUM(%PROCS) > 100% because the same process"
print " can be running when NUMJOBS = N, NUMJOBS = N-1, etc."
print
print "Note: SUM(%TIME) == 100% and SUM(REAL TIME) = Overall Real Time"
print
print "Note: Processes with 0.000 real time not considered."
print
print "Overall Time:"
print "\treal ", LOG.hms(top_rec.diff_times[top_rec.REAL_TIME])
print "\tuser ", LOG.hms(top_rec.diff_times[top_rec.USER_TIME])
print "\tsys ", LOG.hms(top_rec.diff_times[top_rec.SYS_TIME])
print
print "Total Processes Considered:", tot_procs
print
print "-j SLOT NUM PROCS %PROCS REAL TIME %TIME"
for (jobslot, pct_duration) in conprocs.Results():
jobslot_duration = LOG.hms(ovtime * pct_duration / 100.0)
num_procs = len(conprocs.PIDsForJ(jobslot))
pct_procs = 100.0 * float(num_procs) / float(tot_procs)
print " %2d %6d %6.2f%% %15s %6.2f%%" % \
(jobslot, num_procs, pct_procs, jobslot_duration, pct_duration)
if show_tools:
report_the_tools(conprocs, jobslot)
if show_procs:
report_the_procs(conprocs, jobslot)
elif jobslot in show_procs_j:
report_the_procs(conprocs, jobslot)
if show_timeline:
print
print "Timeline"
print
conprocs.PrintMap(sys.stdout)
| gilramir/instmake | instmakeplugins/report_conprocs.py | Python | bsd-3-clause | 5,024 |
from django import template
register = template.Library()
@register.filter
def lookup(dict, key):
return dict[key]
| mtarsel/Django-MOOC | instructor_portal/app_tags/app_tags.py | Python | gpl-2.0 | 121 |
"""Test cases for the limits extension."""
from django.core.urlresolvers import reverse
from modoboa.core.factories import UserFactory
from modoboa.core.models import User
from modoboa.lib import parameters
from modoboa.lib.tests import ModoTestCase
from modoboa_admin.factories import populate_database
from modoboa_admin.models import Alias, Domain
from .models import LimitTemplates
class PermissionsTestCase(ModoTestCase):
def setUp(self):
super(PermissionsTestCase, self).setUp()
populate_database()
def test_domainadmin_deletes_reseller(self):
"""Check if a domain admin can delete a reseller.
Expected result: no.
"""
values = dict(
username="reseller@test.com", first_name="Reseller", last_name="",
password1="toto", password2="toto", role="Resellers",
is_active=True, email="reseller@test.com", stepid='step2'
)
self.ajax_post(reverse("modoboa_admin:account_add"), values)
account = User.objects.get(username="reseller@test.com")
self.clt.logout()
self.clt.login(username="admin@test.com", password="toto")
resp = self.ajax_post(
reverse("modoboa_admin:account_delete", args=[account.id]),
{}, status=403
)
self.assertEqual(resp, "Permission denied")
class ResourceTestCase(ModoTestCase):
def setUp(self):
"""Custom setUp method.
The 'limits' is manually loaded to ensure extra parameters
provided by 'postfix_relay_domains' are properly received.
"""
super(ResourceTestCase, self).setUp()
#exts_pool.load_extension("modoboa_admin_limits")
for tpl in LimitTemplates().templates:
parameters.save_admin(
"DEFLT_{0}".format(tpl[0].upper()), 2,
app="modoboa_admin_limits"
)
populate_database()
def _create_account(self, username, role='SimpleUsers', status=200):
values = dict(
username=username, first_name="Tester", last_name="Toto",
password1="toto", password2="toto", role=role,
quota_act=True,
is_active=True, email=username, stepid='step2',
)
return self.ajax_post(
reverse("modoboa_admin:account_add"), values, status
)
def _create_alias(self, email, rcpt='user@test.com', status=200):
values = dict(
email=email, recipients=rcpt, enabled=True
)
return self.ajax_post(
reverse("modoboa_admin:alias_add"), values, status
)
def _create_domain(self, name, status=200, withtpl=False):
values = {
"name": name, "quota": 100, "create_dom_admin": "no",
"create_aliases": "no", "stepid": 'step2'
}
if withtpl:
values['create_dom_admin'] = 'yes'
values['dom_admin_username'] = 'admin'
values['create_aliases'] = 'yes'
return self.ajax_post(
reverse("modoboa_admin:domain_add"), values, status
)
def _domain_alias_operation(self, optype, domain, name, status=200):
dom = Domain.objects.get(name=domain)
values = {
'name': dom.name, 'quota': dom.quota, 'enabled': dom.enabled,
}
aliases = [alias.name for alias in dom.domainalias_set.all()]
if optype == 'add':
aliases.append(name)
else:
aliases.remove(name)
for cpt, alias in enumerate(aliases):
fname = 'aliases' if not cpt else 'aliases_%d' % cpt
values[fname] = alias
self.ajax_post(
reverse("modoboa_admin:domain_change", args=[dom.id]),
values, status
)
def _check_limit(self, name, curvalue, maxvalue):
l = self.user.limitspool.get_limit('%s_limit' % name)
self.assertEqual(l.curvalue, curvalue)
self.assertEqual(l.maxvalue, maxvalue)
class DomainAdminTestCase(ResourceTestCase):
def setUp(self):
super(DomainAdminTestCase, self).setUp()
self.user = User.objects.get(username='admin@test.com')
self.user.limitspool.set_maxvalue('mailboxes_limit', 2)
self.user.limitspool.set_maxvalue('mailbox_aliases_limit', 2)
self.clt.logout()
self.clt.login(username='admin@test.com', password='toto')
def test_mailboxes_limit(self):
self._create_account('tester1@test.com')
self._check_limit('mailboxes', 1, 2)
self._create_account('tester2@test.com')
self._check_limit('mailboxes', 2, 2)
resp = self._create_account('tester3@test.com', status=403)
self._check_limit('mailboxes', 2, 2)
self.ajax_post(
reverse('modoboa_admin:account_delete',
args=[User.objects.get(username='tester2@test.com').id]),
{}
)
self._check_limit('mailboxes', 1, 2)
def test_aliases_limit(self):
self._create_alias('alias1@test.com')
self._check_limit('mailbox_aliases', 1, 2)
self._create_alias('alias2@test.com')
self._check_limit('mailbox_aliases', 2, 2)
resp = self._create_alias('alias3@test.com', status=403)
self._check_limit('mailbox_aliases', 2, 2)
self.ajax_post(
reverse('modoboa_admin:alias_delete') + '?selection=%d'
% Alias.objects.get(address='alias2', domain__name='test.com').id,
{}
)
self._check_limit('mailbox_aliases', 1, 2)
def test_aliases_limit_through_account_form(self):
user = User.objects.get(username='user@test.com')
values = dict(
username=user.username, role=user.group,
is_active=user.is_active, email=user.email, quota_act=True,
aliases="alias1@test.com", aliases_1="alias2@test.com"
)
self.ajax_post(
reverse("modoboa_admin:account_change", args=[user.id]),
values
)
Alias.objects.get(address='alias1', domain__name='test.com')
self._check_limit('mailbox_aliases', 2, 2)
class ResellerTestCase(ResourceTestCase):
def setUp(self):
super(ResellerTestCase, self).setUp()
self.user = UserFactory.create(
username='reseller', groups=('Resellers',)
)
self.clt.logout()
self.clt.login(username='reseller', password='toto')
def test_domains_limit(self):
self._create_domain('domain1.tld')
self._check_limit('domains', 1, 2)
self._create_domain('domain2.tld')
self._check_limit('domains', 2, 2)
resp = self._create_domain('domain3.tld', 403)
self._check_limit('domains', 2, 2)
self.ajax_post(
reverse('modoboa_admin:domain_delete',
args=[Domain.objects.get(name='domain2.tld').id]),
{}
)
self._check_limit('domains', 1, 2)
def test_domain_aliases_limit(self):
self._create_domain('pouet.com')
self._domain_alias_operation('add', 'pouet.com', 'domain-alias1.tld')
self._check_limit('domain_aliases', 1, 2)
self._domain_alias_operation('add', 'pouet.com', 'domain-alias2.tld')
self._check_limit('domain_aliases', 2, 2)
resp = self._domain_alias_operation(
'add', 'pouet.com', 'domain-alias3.tld', 403
)
self._check_limit('domain_aliases', 2, 2)
self._domain_alias_operation('delete', 'pouet.com', 'domain-alias2.tld')
self._check_limit('domain_aliases', 1, 2)
def test_domain_admins_limit(self):
self._create_domain('domain.tld')
self._create_account('admin1@domain.tld', role='DomainAdmins')
self._check_limit('domain_admins', 1, 2)
self._create_account('admin2@domain.tld', role='DomainAdmins')
self._check_limit('domain_admins', 2, 2)
resp = self._create_account(
'admin3@domain.tld',
role='DomainAdmins',
status=400)
self.assertEqual(
resp['form_errors']['role'][0],
'Select a valid choice. DomainAdmins is not one of the available '
'choices.'
)
self._check_limit('domain_admins', 2, 2)
self.user.limitspool.set_maxvalue('mailboxes_limit', 3)
self._create_account('user1@domain.tld')
user = User.objects.get(username='user1@domain.tld')
values = {
'username': user.username, 'role': 'DomainAdmins',
'quota_act': True, 'is_active': user.is_active,
'email': user.email
}
resp = self.ajax_post(
reverse("modoboa_admin:account_change", args=[user.id]),
values, status=400
)
self.assertEqual(
resp['form_errors']['role'][0],
'Select a valid choice. DomainAdmins is not one of the available '
'choices.'
)
self._check_limit('domain_admins', 2, 2)
def test_domain_admin_resource_are_empty(self):
self._create_domain('domain.tld')
self._create_account('admin1@domain.tld', role='DomainAdmins')
domadmin = User.objects.get(username='admin1@domain.tld')
for l in ['mailboxes', 'mailbox_aliases']:
self.assertEqual(
domadmin.limitspool.get_limit('%s_limit' % l).maxvalue, 0
)
def test_domain_admins_limit_from_domain_tpl(self):
self.user.limitspool.set_maxvalue('domains_limit', 3)
self._create_domain('domain1.tld', withtpl=True)
self._create_domain('domain2.tld', withtpl=True)
self._check_limit('domain_admins', 2, 2)
self._check_limit('domains', 2, 3)
resp = self._create_domain('domain3.tld', status=200, withtpl=True)
self._check_limit('domain_admins', 2, 2)
self._check_limit('domains', 3, 3)
def test_reseller_deletes_domain(self):
"""Check if all resources are restored after the deletion.
"""
self._create_domain('domain.tld', withtpl=True)
dom = Domain.objects.get(name="domain.tld")
self.ajax_post(
reverse("modoboa_admin:domain_delete", args=[dom.id]),
{}
)
self._check_limit('domains', 0, 2)
self._check_limit('domain_admins', 1, 2)
self._check_limit('mailboxes', 0, 2)
self._check_limit('mailbox_aliases', 0, 2)
def test_sadmin_removes_ownership(self):
self._create_domain('domain.tld', withtpl=True)
dom = Domain.objects.get(name="domain.tld")
self.clt.logout()
self.clt.login(username='admin', password='password')
self.ajax_get(
"{0}?domid={1}&daid={2}".format(
reverse('modoboa_admin:permission_remove'), dom.id, self.user.id
), {}
)
self._check_limit('domains', 0, 2)
self._check_limit('domain_admins', 0, 2)
self._check_limit('mailboxes', 0, 2)
self._check_limit('mailbox_aliases', 0, 2)
def test_allocate_from_pool(self):
self._create_domain('domain.tld')
self._create_account('admin1@domain.tld', role='DomainAdmins')
user = User.objects.get(username='admin1@domain.tld')
# Give 1 mailbox and 2 aliases to the admin -> should work
values = {
'username': user.username, 'role': user.group, 'quota_act': True,
'is_active': user.is_active, 'email': user.email,
'mailboxes_limit': 1, 'mailbox_aliases_limit': 2
}
self.ajax_post(
reverse("modoboa_admin:account_change", args=[user.id]),
values
)
self._check_limit('mailboxes', 1, 1)
self._check_limit('mailbox_aliases', 0, 0)
# Delete the admin -> resources should go back to the
# reseller's pool
self.ajax_post(
reverse("modoboa_admin:account_delete", args=[user.id]),
{}
)
self._check_limit('mailboxes', 0, 2)
self._check_limit('mailbox_aliases', 0, 2)
def test_restore_resources(self):
self._create_domain('domain.tld')
dom = Domain.objects.get(name='domain.tld')
self._create_account('admin1@domain.tld', role='DomainAdmins')
user = User.objects.get(username='admin1@domain.tld')
values = {
'username': user.username, 'role': user.group, 'quota_act': True,
'is_active': user.is_active, 'email': user.email,
'mailboxes_limit': 1, 'mailbox_aliases_limit': 2
}
self.ajax_post(
reverse("modoboa_admin:account_change", args=[user.id]),
values
)
dom.add_admin(user)
self.clt.logout()
self.clt.login(username='admin1@domain.tld', password='toto')
self._create_account('user1@domain.tld')
self._create_alias('alias1@domain.tld', 'user1@domain.tld')
self._create_alias('alias2@domain.tld', 'user1@domain.tld')
self.clt.logout()
self.clt.login(username='reseller', password='toto')
# Delete the admin -> resources should go back to the
# reseller's pool
self.ajax_post(
reverse("modoboa_admin:account_delete", args=[user.id]),
{}
)
self._check_limit('mailboxes', 1, 2)
self._check_limit('mailbox_aliases', 2, 2)
def test_change_role(self):
self._create_domain('domain.tld')
self._create_account('admin1@domain.tld', role='DomainAdmins')
user = User.objects.get(username='admin1@domain.tld')
# Give 1 mailbox and 2 aliases to the admin -> should work
values = {
'username': user.username, 'role': user.group, 'quota_act': True,
'is_active': user.is_active, 'email': user.email,
'mailboxes_limit': 1, 'mailbox_aliases_limit': 2
}
self.ajax_post(
reverse("modoboa_admin:account_change", args=[user.id]),
values
)
self._check_limit('mailboxes', 1, 1)
self._check_limit('mailbox_aliases', 0, 0)
# Change admin role to SimpleUser -> resources should go back
# to the reseller.
values = {
'username': user.username, 'role': 'SimpleUsers', 'quota_act': True,
'is_active': user.is_active, 'email': user.email,
}
self.ajax_post(
reverse("modoboa_admin:account_change", args=[user.id]),
values
)
self._check_limit('mailboxes', 1, 2)
self._check_limit('mailbox_aliases', 0, 2)
def test_allocate_too_much(self):
self._create_domain('domain.tld')
self._create_account('admin1@domain.tld', role='DomainAdmins')
user = User.objects.get(username='admin1@domain.tld')
# Give 2 mailboxes and 3 aliases to the admin -> should fail.
values = {
'username': user.username, 'role': user.group, 'quota_act': True,
'is_active': user.is_active, 'email': user.email,
'mailboxes_limit': 2, 'mailbox_aliases_limit': 3
}
resp = self.ajax_post(
reverse("modoboa_admin:account_change", args=[user.id]),
values, 424
)
self.assertEqual(resp, 'Not enough resources')
self._check_limit('mailboxes', 1, 2)
self._check_limit('mailbox_aliases', 0, 2)
| disko/modoboa-admin-limits | modoboa_admin_limits/tests.py | Python | mit | 15,385 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Checks for SDK updates."""
import datetime
import logging
import os
import socket
import ssl
import sys
import time
import urllib.request, urllib.error, urllib.parse
import google
import yaml
from google.appengine.api import validation
from google.appengine.api import yaml_object
VERSION_FILE = '../../VERSION'
UPDATE_CHECK_TIMEOUT = 3
NAG_FILE = '.appcfg_nag'
class NagFile(validation.Validated):
"""A validated YAML class to represent the user's nag preferences.
Attributes:
timestamp: The timestamp of the last nag.
opt_in: True if the user wants to check for updates on dev_appserver
start. False if not. May be None if we have not asked the user yet.
"""
ATTRIBUTES = {
'timestamp': validation.TYPE_FLOAT,
'opt_in': validation.Optional(validation.TYPE_BOOL),
}
@staticmethod
def Load(nag_file):
"""Load a single NagFile object where one and only one is expected.
Args:
nag_file: A file-like object or string containing the yaml data to parse.
Returns:
A NagFile instance.
"""
return yaml_object.BuildSingleObject(NagFile, nag_file)
def GetVersionObject():
"""Gets the version of the SDK by parsing the VERSION file.
Returns:
A Yaml object or None if the VERSION file does not exist.
"""
version_filename = os.path.join(os.path.dirname(google.appengine.__file__),
VERSION_FILE)
try:
version_fh = open(version_filename)
except IOError:
logging.error('Could not find version file at %s', version_filename)
return None
try:
version = yaml.safe_load(version_fh)
finally:
version_fh.close()
return version
def _VersionList(release):
"""Parse a version string into a list of ints.
Args:
release: The 'release' version, e.g. '1.2.4'.
(Due to YAML parsing this may also be an int or float.)
Returns:
A list of ints corresponding to the parts of the version string
between periods. Example:
'1.2.4' -> [1, 2, 4]
'1.2.3.4' -> [1, 2, 3, 4]
Raises:
ValueError if not all the parts are valid integers.
"""
return [int(part) for part in str(release).split('.')]
class SDKUpdateChecker(object):
"""Determines if the local SDK is the latest version.
Nags the user when there are updates to the SDK. As the SDK becomes
more out of date, the language in the nagging gets stronger. We
store a little yaml file in the user's home directory so that we nag
the user only once a week.
The yaml file has the following field:
'timestamp': Last time we nagged the user in seconds since the epoch.
Attributes:
rpcserver: An AbstractRpcServer instance used to check for the latest SDK.
config: The app's AppInfoExternal. Needed to determine which api_version
the app is using.
"""
def __init__(self,
rpcserver,
configs):
"""Create a new SDKUpdateChecker.
Args:
rpcserver: The AbstractRpcServer to use.
configs: A list of yaml objects or a single yaml object that specify the
configuration of this application.
"""
if not isinstance(configs, list):
configs = [configs]
self.rpcserver = rpcserver
self.runtimes = set(config.runtime for config in configs)
self.runtime_to_api_version = {}
for config in configs:
self.runtime_to_api_version.setdefault(
config.runtime, set()).add(config.api_version)
@staticmethod
def MakeNagFilename():
"""Returns the filename for the nag file for this user."""
user_homedir = os.path.expanduser('~/')
if not os.path.isdir(user_homedir):
drive, unused_tail = os.path.splitdrive(os.__file__)
if drive:
os.environ['HOMEDRIVE'] = drive
return os.path.expanduser('~/' + NAG_FILE)
def _ParseVersionFile(self):
"""Parse the local VERSION file.
Returns:
A Yaml object or None if the file does not exist.
"""
return GetVersionObject()
def CheckSupportedVersion(self):
"""Determines if the app's api_version is supported by the SDK.
Uses the api_version field from the AppInfoExternal to determine if
the SDK supports that api_version.
Raises:
sys.exit if the api_version is not supported.
"""
version = self._ParseVersionFile()
if version is None:
logging.error('Could not determine if the SDK supports the api_version '
'requested in app.yaml.')
return
unsupported_api_versions_found = False
for runtime, api_versions in list(self.runtime_to_api_version.items()):
supported_api_versions = _GetSupportedApiVersions(version, runtime)
unsupported_api_versions = sorted(api_versions -
set(supported_api_versions))
if unsupported_api_versions:
unsupported_api_versions_found = True
if len(unsupported_api_versions) == 1:
logging.critical('The requested api_version (%s) is not supported by '
'the %s runtime in this release of the SDK. The '
'supported api_versions are %s.',
unsupported_api_versions[0], runtime,
supported_api_versions)
else:
logging.critical('The requested api_versions (%s) are not supported '
'by the %s runtime in this release of the SDK. The '
'supported api_versions are %s.',
unsupported_api_versions, runtime,
supported_api_versions)
if unsupported_api_versions_found:
sys.exit(1)
def CheckForUpdates(self):
"""Queries the server for updates and nags the user if appropriate.
Queries the server for the latest SDK version at the same time reporting
the local SDK version. The server will respond with a yaml document
containing the fields:
'release': The name of the release (e.g. 1.2).
'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
'api_versions': A list of api_version strings (e.g. ['1', 'beta']).
We will nag the user with increasing severity if:
- There is a new release.
- There is a new release with a new api_version.
- There is a new release that does not support an api_version named in
a configuration in self.configs.
"""
version = self._ParseVersionFile()
if version is None:
logging.info('Skipping update check')
return
logging.info('Checking for updates to the SDK.')
responses = {}
try:
for runtime in self.runtimes:
responses[runtime] = yaml.safe_load(self.rpcserver.Send(
'/api/updatecheck',
timeout=UPDATE_CHECK_TIMEOUT,
release=version['release'],
timestamp=version['timestamp'],
api_versions=version['api_versions'],
runtime=runtime))
except (urllib.error.URLError, socket.error, ssl.SSLError) as e:
logging.info('Update check failed: %s', e)
return
try:
latest = sorted(list(responses.values()), reverse=True,
key=lambda release: _VersionList(release['release']))[0]
except ValueError:
logging.warn('Could not parse this release version')
if version['release'] == latest['release']:
logging.info('The SDK is up to date.')
return
try:
this_release = _VersionList(version['release'])
except ValueError:
logging.warn('Could not parse this release version (%r)',
version['release'])
else:
try:
advertised_release = _VersionList(latest['release'])
except ValueError:
logging.warn('Could not parse advertised release version (%r)',
latest['release'])
else:
if this_release > advertised_release:
logging.info('This SDK release is newer than the advertised release.')
return
for runtime, response in list(responses.items()):
api_versions = _GetSupportedApiVersions(response, runtime)
obsolete_versions = sorted(
self.runtime_to_api_version[runtime] - set(api_versions))
if len(obsolete_versions) == 1:
self._Nag(
'The api version you are using (%s) is obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions[0],
response, version, force=True)
elif obsolete_versions:
self._Nag(
'The api versions you are using (%s) are obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % obsolete_versions,
response, version, force=True)
deprecated_versions = sorted(
self.runtime_to_api_version[runtime].intersection(api_versions[:-1]))
if len(deprecated_versions) == 1:
self._Nag(
'The api version you are using (%s) is deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions[0], response, version)
elif deprecated_versions:
self._Nag(
'The api versions you are using (%s) are deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
deprecated_versions, response, version)
self._Nag('There is a new release of the SDK available.',
latest, version)
def _ParseNagFile(self):
"""Parses the nag file.
Returns:
A NagFile if the file was present else None.
"""
nag_filename = SDKUpdateChecker.MakeNagFilename()
try:
fh = open(nag_filename)
except IOError:
return None
try:
nag = NagFile.Load(fh)
finally:
fh.close()
return nag
def _WriteNagFile(self, nag):
"""Writes the NagFile to the user's nag file.
If the destination path does not exist, this method will log an error
and fail silently.
Args:
nag: The NagFile to write.
"""
nagfilename = SDKUpdateChecker.MakeNagFilename()
try:
fh = open(nagfilename, 'w')
try:
fh.write(nag.ToYAML())
finally:
fh.close()
except (OSError, IOError) as e:
logging.error('Could not write nag file to %s. Error: %s', nagfilename, e)
def _Nag(self, msg, latest, version, force=False):
"""Prints a nag message and updates the nag file's timestamp.
Because we don't want to nag the user everytime, we store a simple
yaml document in the user's home directory. If the timestamp in this
doc is over a week old, we'll nag the user. And when we nag the user,
we update the timestamp in this doc.
Args:
msg: The formatted message to print to the user.
latest: The yaml document received from the server.
version: The local yaml version document.
force: If True, always nag the user, ignoring the nag file.
"""
nag = self._ParseNagFile()
if nag and not force:
last_nag = datetime.datetime.fromtimestamp(nag.timestamp)
if datetime.datetime.now() - last_nag < datetime.timedelta(weeks=1):
logging.debug('Skipping nag message')
return
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
self._WriteNagFile(nag)
print('****************************************************************')
print(msg)
print('-----------')
print('Latest SDK:')
print(yaml.dump(latest))
print('-----------')
print('Your SDK:')
print(yaml.dump(version))
print('-----------')
print('Please visit https://developers.google.com/appengine/downloads')
print('for the latest SDK')
print('****************************************************************')
def AllowedToCheckForUpdates(self, input_fn=input):
"""Determines if the user wants to check for updates.
On startup, the dev_appserver wants to check for updates to the SDK.
Because this action reports usage to Google when the user is not
otherwise communicating with Google (e.g. pushing a new app version),
the user must opt in.
If the user does not have a nag file, we will query the user and
save the response in the nag file. Subsequent calls to this function
will re-use that response.
Args:
input_fn: used to collect user input. This is for testing only.
Returns:
True if the user wants to check for updates. False otherwise.
"""
nag = self._ParseNagFile()
if nag is None:
nag = NagFile()
nag.timestamp = 0.0
if nag.opt_in is None:
answer = input_fn('Allow dev_appserver to check for updates on startup? '
'(Y/n): ')
answer = answer.strip().lower()
if answer == 'n' or answer == 'no':
print(('dev_appserver will not check for updates on startup. To '
'change this setting, edit %s' %
SDKUpdateChecker.MakeNagFilename()))
nag.opt_in = False
else:
print(('dev_appserver will check for updates on startup. To change '
'this setting, edit %s' % SDKUpdateChecker.MakeNagFilename()))
nag.opt_in = True
self._WriteNagFile(nag)
return nag.opt_in
def _GetSupportedApiVersions(versions, runtime):
"""Returns the runtime-specific or general list of supported runtimes.
The provided 'versions' dict contains a field called 'api_versions'
which is the list of default versions supported. This dict may also
contain a 'supported_api_versions' dict which lists api_versions by
runtime. This function will prefer to return the runtime-specific
api_versions list, but will default to the general list.
Args:
versions: dict of versions from app.yaml or /api/updatecheck server.
runtime: string of current runtime (e.g. 'go').
Returns:
List of supported api_versions (e.g. ['go1']).
"""
if 'supported_api_versions' in versions:
return versions['supported_api_versions'].get(
runtime, versions)['api_versions']
return versions['api_versions']
| Suwmlee/XX-Net | gae_proxy/server/lib/google/appengine/tools/sdk_update_checker.py | Python | bsd-2-clause | 14,750 |
#!/usr/bin/env python
from setuptools import setup, find_packages
with open('README.md') as f:
long_description = f.read()
setup(
name='harvest_api_client',
version='1.1.3',
description='A client for the Harvest API (getharvest.com)',
license='MIT',
author='Alex Maslakov',
author_email='Alex Maslakov<me@gildedhonour.com>, Alex Maslakov<gilded.honour@gmail.com>',
url='https://github.com/GildedHonour/harvest-api-client',
packages=['harvest_api_client'],
long_description=long_description,
keywords = ['harvest-api', 'api', 'harvest.com', 'getharvest.com', 'harvest', 'api client'],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
],
) | GildedHonour/harvest-api-client | setup.py | Python | mit | 1,002 |
#!/usr/bin/env python
# encoding: utf-8
#
# Description: Plugin for processing Chuck Norris requests
# Author: Pablo Iranzo Gomez (Pablo.Iranzo@gmail.com)
import json
import logging
import requests
import stampy.plugin.config
import stampy.stampy
from stampy.i18n import _
from stampy.i18n import _L
import random
def init():
"""
Initializes module
:return: List of triggers for plugin
"""
triggers = ["^/cn"]
return triggers
def run(message): # do not edit this line
"""
Executes plugin
:param message: message to run against
:return:
"""
text = stampy.stampy.getmsgdetail(message)["text"]
if text:
if text.split()[0].lower() == "/cn":
cn(message=message)
return
def help(message): # do not edit this line
"""
Returns help for plugin
:param message: message to process
:return: help text
"""
commandtext = _("Use `/cn <word>` to get a random Chuck Norris quote based on word\n\n")
return commandtext
def cn(message):
"""
Processes cn commands
:param message: Message with the command
:return:
"""
logger = logging.getLogger(__name__)
msgdetail = stampy.stampy.getmsgdetail(message)
texto = msgdetail["text"]
chat_id = msgdetail["chat_id"]
message_id = msgdetail["message_id"]
who_un = msgdetail["who_un"]
logger.debug(msg=_L("Command: %s by %s" % (texto, who_un)))
# We might be have been given no command, just stock
try:
command = texto.split(' ')[1]
except:
command = False
if not command:
url = "https://api.chucknorris.io/jokes/random"
else:
url = "https://api.chucknorris.io/jokes/search?query=%s" % command
text = "``` "
# we might get more than one result
try:
result = json.loads(requests.get(url).content)
except:
result = None
if result:
if 'result' in result:
if result['total'] != 0:
try:
totalelem = len(result['result'])
except:
totalelem = 0
if totalelem > 1:
elem = random.randint(0, totalelem - 1)
else:
elem = 0
text += result['result'][elem]['value']
else:
text += "Chuck Norris didn't said a word about it."
else:
text += result['value']
text += " ```"
stampy.stampy.sendmessage(chat_id=chat_id, text=text,
reply_to_message_id=message_id,
disable_web_page_preview=True,
parse_mode="Markdown")
| iranzo/stampython | stampy/plugin/chuck.py | Python | gpl-3.0 | 2,702 |
'''Tools for working with files in the samtools pileup -c format.'''
import collections
import pysam
PileupSubstitution = collections.namedtuple("PileupSubstitution",
" ".join((
"chromosome",
"pos",
"reference_base",
"genotype",
"consensus_quality",
"snp_quality",
"mapping_quality",
"coverage",
"read_bases",
"base_qualities")))
PileupIndel = collections.namedtuple("PileupIndel",
" ".join((
"chromosome",
"pos",
"reference_base",
"genotype",
"consensus_quality",
"snp_quality",
"mapping_quality",
"coverage",
"first_allele",
"second_allele",
"reads_first",
"reads_second",
"reads_diff")))
def iterate(infile):
'''iterate over ``samtools pileup -c`` formatted file.
*infile* can be any iterator over a lines.
The function yields named tuples of the type :class:`pysam.Pileup.PileupSubstitution`
or :class:`pysam.Pileup.PileupIndel`.
.. note::
The parser converts to 0-based coordinates
'''
conv_subst = (str, lambda x: int(x) - 1, str,
str, int, int, int, int, str, str)
conv_indel = (str, lambda x: int(x) - 1, str, str, int,
int, int, int, str, str, int, int, int)
for line in infile:
d = line[:-1].split()
if d[2] == "*":
try:
yield PileupIndel(*[x(y) for x, y in zip(conv_indel, d)])
except TypeError:
raise pysam.SamtoolsError("parsing error in line: `%s`" % line)
else:
try:
yield PileupSubstitution(*[x(y) for x, y in zip(conv_subst, d)])
except TypeError:
raise pysam.SamtoolsError("parsing error in line: `%s`" % line)
ENCODE_GENOTYPE = {
'A': 'A', 'C': 'C', 'G': 'G', 'T': 'T',
'AA': 'A', 'CC': 'C', 'GG': 'G', 'TT': 'T', 'UU': 'U',
'AG': 'r', 'GA': 'R',
'CT': 'y', 'TC': 'Y',
'AC': 'm', 'CA': 'M',
'GT': 'k', 'TG': 'K',
'CG': 's', 'GC': 'S',
'AT': 'w', 'TA': 'W',
}
DECODE_GENOTYPE = {
'A': 'AA',
'C': 'CC',
'G': 'GG',
'T': 'TT',
'r': 'AG', 'R': 'AG',
'y': 'CT', 'Y': 'CT',
'm': 'AC', 'M': 'AC',
'k': 'GT', 'K': 'GT',
's': 'CG', 'S': 'CG',
'w': 'AT', 'W': 'AT',
}
# ------------------------------------------------------------
def encodeGenotype(code):
'''encode genotypes like GG, GA into a one-letter code.
The returned code is lower case if code[0] < code[1], otherwise
it is uppercase.
'''
return ENCODE_GENOTYPE[code.upper()]
def decodeGenotype(code):
'''decode single letter genotypes like m, M into two letters.
This is the reverse operation to :meth:`encodeGenotype`.
'''
return DECODE_GENOTYPE[code]
def translateIndelGenotypeFromVCF(vcf_genotypes, ref):
'''translate indel from vcf to pileup format.'''
# indels
def getPrefix(s1, s2):
'''get common prefix of strings s1 and s2.'''
n = min(len(s1), len(s2))
for x in range(n):
if s1[x] != s2[x]:
return s1[:x]
return s1[:n]
def getSuffix(s1, s2):
'''get common sufix of strings s1 and s2.'''
n = min(len(s1), len(s2))
if s1[-1] != s2[-1]:
return ""
for x in range(-2, -n - 1, -1):
if s1[x] != s2[x]:
return s1[x + 1:]
return s1[-n:]
def getGenotype(variant, ref):
if variant == ref:
return "*", 0
if len(ref) > len(variant):
# is a deletion
if ref.startswith(variant):
return "-%s" % ref[len(variant):], len(variant) - 1
elif ref.endswith(variant):
return "-%s" % ref[:-len(variant)], -1
else:
prefix = getPrefix(ref, variant)
suffix = getSuffix(ref, variant)
shared = len(prefix) + len(suffix) - len(variant)
# print "-", prefix, suffix, ref, variant, shared, len(prefix), len(suffix), len(ref)
if shared < 0:
raise ValueError()
return "-%s" % ref[len(prefix):-(len(suffix) - shared)], len(prefix) - 1
elif len(ref) < len(variant):
# is an insertion
if variant.startswith(ref):
return "+%s" % variant[len(ref):], len(ref) - 1
elif variant.endswith(ref):
return "+%s" % variant[:len(ref)], 0
else:
prefix = getPrefix(ref, variant)
suffix = getSuffix(ref, variant)
shared = len(prefix) + len(suffix) - len(ref)
if shared < 0:
raise ValueError()
return "+%s" % variant[len(prefix):-(len(suffix) - shared)], len(prefix)
else:
assert 0, "snp?"
# in pileup, the position refers to the base
# after the coordinate, hence subtract 1
# pos -= 1
genotypes, offsets = [], []
is_error = True
for variant in vcf_genotypes:
try:
g, offset = getGenotype(variant, ref)
except ValueError:
break
genotypes.append(g)
if g != "*":
offsets.append(offset)
else:
is_error = False
if is_error:
raise ValueError()
assert len(set(offsets)) == 1, "multiple offsets for indel"
offset = offsets[0]
genotypes = "/".join(genotypes)
return genotypes, offset
def vcf2pileup(vcf, sample):
'''convert vcf record to pileup record.'''
chromosome = vcf.contig
pos = vcf.pos
reference = vcf.ref
allelles = [reference] + vcf.alt
data = vcf[sample]
# get genotype
genotypes = data["GT"]
if len(genotypes) > 1:
raise ValueError("only single genotype per position, %s" % (str(vcf)))
genotypes = genotypes[0]
# not a variant
if genotypes[0] == ".":
return None
genotypes = [allelles[int(x)] for x in genotypes if x != "/"]
# snp_quality is "genotype quality"
snp_quality = consensus_quality = data.get("GQ", [0])[0]
mapping_quality = vcf.info.get("MQ", [0])[0]
coverage = data.get("DP", 0)
if len(reference) > 1 or max([len(x) for x in vcf.alt]) > 1:
# indel
genotype, offset = translateIndelGenotypeFromVCF(genotypes, reference)
return PileupIndel(chromosome,
pos + offset,
"*",
genotype,
consensus_quality,
snp_quality,
mapping_quality,
coverage,
genotype,
"<" * len(genotype),
0,
0,
0)
else:
genotype = encodeGenotype("".join(genotypes))
read_bases = ""
base_qualities = ""
return PileupSubstitution(chromosome, pos, reference,
genotype, consensus_quality,
snp_quality, mapping_quality,
coverage, read_bases,
base_qualities)
def iterate_from_vcf(infile, sample):
'''iterate over a vcf-formatted file.
*infile* can be any iterator over a lines.
The function yields named tuples of the type
:class:`pysam.Pileup.PileupSubstitution` or
:class:`pysam.Pileup.PileupIndel`.
Positions without a snp will be skipped.
This method is wasteful and written to support same legacy code
that expects samtools pileup output.
Better use the vcf parser directly.
'''
vcf = pysam.VCF()
vcf.connect(infile)
if sample not in vcf.getsamples():
raise KeyError("sample %s not vcf file")
for row in vcf.fetch():
result = vcf2pileup(row, sample)
if result:
yield result
| kyleabeauchamp/pysam | pysam/Pileup.py | Python | mit | 8,975 |
#
# Copyright (c) 2014 Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""This module provides the necessary interfaces to perform re-sharding in
fabric. The module takes care of the shard move, shard split and the shard
prune operations.
"""
import logging
import time
from mysql.connector.errorcode import (
ER_NO_SUCH_TABLE,
)
from mysql.fabric import (
errors as _errors,
events as _events,
group_replication as _group_replication,
replication as _replication,
backup as _backup,
utils as _utils,
)
from mysql.fabric.server import (
Group,
MySQLServer,
)
from mysql.fabric.sharding import (
ShardMapping,
RangeShardingSpecification,
HashShardingSpecification,
Shards,
SHARDING_DATATYPE_HANDLER,
SHARDING_SPECIFICATION_HANDLER,
SHARD_METADATA,
SHARD_METADATA_VERIFIER,
)
from mysql.fabric.command import (
ProcedureShard,
)
from mysql.fabric.services import (
sharding as _services_sharding,
utils as _services_utils,
)
_LOGGER = logging.getLogger(__name__)
PRUNE_SHARD_TABLES = _events.Event("PRUNE_SHARD_TABLES")
class PruneShardTables(ProcedureShard):
"""Given the table name prune the tables according to the defined
sharding specification for the table.
"""
group_name = "sharding"
command_name = "prune_shard"
def execute(self, table_name, synchronous=True):
"""Given the table name prune the tables according to the defined
sharding specification for the table. The command prunes all the
tables that are part of this shard. There might be multiple tables that
are part of the same shard, these tables will be related together by
the same sharding key.
:param table_name: The table that needs to be sharded.
:param synchronous: Whether one should wait until the execution finishes
or not.
"""
prune_limit = _services_utils.read_config_value(
self.config,
'sharding',
'prune_limit'
)
procedures = _events.trigger(
PRUNE_SHARD_TABLES,
self.get_lockable_objects(),
table_name,
prune_limit
)
return self.wait_for_procedures(procedures, synchronous)
CHECK_SHARD_INFORMATION = _events.Event("CHECK_SHARD_INFORMATION")
BACKUP_SOURCE_SHARD = _events.Event("BACKUP_SOURCE_SHARD")
RESTORE_SHARD_BACKUP = _events.Event("RESTORE_SHARD_BACKUP")
SETUP_REPLICATION = _events.Event("SETUP_REPLICATION")
SETUP_SYNC = _events.Event("SETUP_SYNC")
SETUP_RESHARDING_SWITCH = _events.Event("SETUP_RESHARDING_SWITCH")
PRUNE_SHARDS = _events.Event("PRUNE_SHARDS")
class MoveShardServer(ProcedureShard):
"""Move the shard represented by the shard_id to the destination group.
By default this operation takes a backup, restores it on the destination
group and guarantees that source and destination groups are synchronized
before pointing the shard to the new group. If users just want to update
the state store and skip these provisioning steps, the update_only
parameter must be set to true.
"""
group_name = "sharding"
command_name = "move_shard"
def execute(self, shard_id, group_id, update_only=False,
synchronous=True):
"""Move the shard represented by the shard_id to the destination group.
:param shard_id: The ID of the shard that needs to be moved.
:param group_id: The ID of the group to which the shard needs to
be moved.
:update_only: Only update the state store and skip provisioning.
:param synchronous: Whether one should wait until the execution finishes
or not.
"""
mysqldump_binary = _services_utils.read_config_value(
self.config,
'sharding',
'mysqldump_program'
)
mysqlclient_binary = _services_utils.read_config_value(
self.config,
'sharding',
'mysqlclient_program'
)
config_file = self.config.config_file if self.config.config_file else ""
procedures = _events.trigger(
CHECK_SHARD_INFORMATION, self.get_lockable_objects(), shard_id,
group_id, mysqldump_binary, mysqlclient_binary, None, config_file,
"", "MOVE", update_only
)
return self.wait_for_procedures(procedures, synchronous)
class SplitShardServer(ProcedureShard):
"""Split the shard represented by the shard_id into the destination group.
By default this operation takes a backup, restores it on the destination
group and guarantees that source and destination groups are synchronized
before pointing the shard to the new group. If users just want to update
the state store and skip these provisioning steps, the update_only
parameter must be set to true.
"""
group_name = "sharding"
command_name = "split_shard"
def execute(self, shard_id, group_id, split_value=None,
update_only=False, synchronous=True):
"""Split the shard represented by the shard_id into the destination
group.
:param shard_id: The shard_id of the shard that needs to be split.
:param group_id: The ID of the group into which the split data needs
to be moved.
:param split_value: The value at which the range needs to be split.
:update_only: Only update the state store and skip provisioning.
:param synchronous: Whether one should wait until the execution
finishes
"""
mysqldump_binary = _services_utils.read_config_value(
self.config,
'sharding',
'mysqldump_program'
)
mysqlclient_binary = _services_utils.read_config_value(
self.config,
'sharding',
'mysqlclient_program'
)
prune_limit = _services_utils.read_config_value(
self.config,
'sharding',
'prune_limit'
)
config_file = self.config.config_file if self.config.config_file else ""
procedures = _events.trigger(
CHECK_SHARD_INFORMATION, self.get_lockable_objects(),
shard_id, group_id, mysqldump_binary, mysqlclient_binary,
split_value, config_file, prune_limit, "SPLIT", update_only)
return self.wait_for_procedures(procedures, synchronous)
@_events.on_event(PRUNE_SHARD_TABLES)
def _prune_shard_tables(table_name, prune_limit):
"""Delete the data from the copied data directories based on the
sharding configuration uploaded in the sharding tables of the state
store. The basic logic consists of
a) Querying the sharding scheme name corresponding to the sharding table
b) Querying the sharding key range using the sharding scheme name.
c) Deleting the sharding keys that fall outside the range for a given
server.
:param table_name: The table_name who's shards need to be pruned.
:param prune_limit: The number of DELETEs that should be
done in one batch.
"""
shard_mapping = ShardMapping.fetch(table_name)
try:
SHARDING_SPECIFICATION_HANDLER[shard_mapping.type_name].\
delete_from_shard_db(table_name, shard_mapping.type_name, prune_limit)
except _errors.DatabaseError as error:
if error.errno == ER_NO_SUCH_TABLE:
#Error happens because the actual tables are not present in the
#server. We will ignore this.
pass
else:
raise error
@_events.on_event(CHECK_SHARD_INFORMATION)
def _check_shard_information(shard_id, destn_group_id, mysqldump_binary,
mysqlclient_binary, split_value, config_file, prune_limit, cmd,
update_only):
"""Verify the sharding information before starting a re-sharding operation.
:param shard_id: The destination shard ID.
:param destn_group_id: The Destination group ID.
:param mysqldump_binary: The path to the mysqldump binary.
:param mysqlclient_binary: The path to the mysqlclient binary.
:param split_value: The point at which the sharding definition
should be split.
:param config_file: The complete path to the fabric configuration
file.
:param prune_limit: The number of DELETEs that should be
done in one batch.
:param cmd: Indicates if it is a split or a move being executed.
:param update_only: If the operation is a update only operation.
"""
if not _services_utils.is_valid_binary(mysqldump_binary):
raise _errors.ShardingError(
_services_sharding.MYSQLDUMP_NOT_FOUND % mysqldump_binary)
if not _services_utils.is_valid_binary(mysqlclient_binary):
raise _errors.ShardingError(
_services_sharding.MYSQLCLIENT_NOT_FOUND % mysqlclient_binary)
if cmd == "SPLIT":
range_sharding_spec, _, shard_mappings, _ = \
_services_sharding.verify_and_fetch_shard(shard_id)
upper_bound = \
SHARDING_SPECIFICATION_HANDLER[shard_mappings[0].type_name].\
get_upper_bound(
range_sharding_spec.lower_bound,
range_sharding_spec.shard_mapping_id,
shard_mappings[0].type_name
)
#If the underlying sharding scheme is a HASH. When a shard is split,
#all the tables that are part of the shard, have the same sharding
#scheme. All the shard mappings associated with this shard_id will be
#of the same sharding type. Hence it is safe to use one of the shard
#mappings.
if shard_mappings[0].type_name == "HASH":
if split_value is not None:
raise _errors.ShardingError(
_services_sharding.NO_LOWER_BOUND_FOR_HASH_SHARDING
)
if upper_bound is None:
#While splitting a range, retrieve the next upper bound and
#find the mid-point, in the case where the next upper_bound
#is unavailable pick the maximum value in the set of values in
#the shard.
upper_bound = HashShardingSpecification.fetch_max_key(shard_id)
#Calculate the split value.
split_value = \
SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\
split_value(
range_sharding_spec.lower_bound,
upper_bound
)
elif split_value is not None:
if not (SHARDING_DATATYPE_HANDLER[shard_mappings[0].type_name].\
is_valid_split_value(
split_value, range_sharding_spec.lower_bound,
upper_bound
)
):
raise _errors.ShardingError(
_services_sharding.INVALID_LOWER_BOUND_VALUE %
(split_value, )
)
elif split_value is None:
raise _errors.ShardingError(
_services_sharding.SPLIT_VALUE_NOT_DEFINED
)
#Ensure that the group does not already contain a shard.
if Shards.lookup_shard_id(destn_group_id) is not None:
raise _errors.ShardingError(
_services_sharding.SHARD_MOVE_DESTINATION_NOT_EMPTY %
(destn_group_id, )
)
#Fetch the group information for the source shard that
#needs to be moved.
source_shard = Shards.fetch(shard_id)
if source_shard is None:
raise _errors.ShardingError(
_services_sharding.SHARD_NOT_FOUND % (shard_id, ))
#Fetch the group_id and the group that hosts the source shard.
source_group_id = source_shard.group_id
destn_group = Group.fetch(destn_group_id)
if destn_group is None:
raise _errors.ShardingError(
_services_sharding.SHARD_GROUP_NOT_FOUND %
(destn_group_id, ))
if not update_only:
_events.trigger_within_procedure(
BACKUP_SOURCE_SHARD, shard_id, source_group_id, destn_group_id,
mysqldump_binary, mysqlclient_binary, split_value, config_file,
prune_limit, cmd, update_only
)
else:
_events.trigger_within_procedure(
SETUP_RESHARDING_SWITCH, shard_id, source_group_id, destn_group_id,
split_value, prune_limit, cmd, update_only
)
@_events.on_event(BACKUP_SOURCE_SHARD)
def _backup_source_shard(shard_id, source_group_id, destn_group_id,
mysqldump_binary, mysqlclient_binary, split_value,
config_file, prune_limit, cmd, update_only):
"""Backup the source shard.
:param shard_id: The shard ID of the shard that needs to be moved.
:param source_group_id: The group_id of the source shard.
:param destn_group_id: The ID of the group to which the shard needs to
be moved.
:param mysqldump_binary: The fully qualified mysqldump binary.
:param mysqlclient_binary: The fully qualified mysql client binary.
:param split_value: Indicates the value at which the range for the
particular shard will be split. Will be set only
for shard split operations.
:param config_file: The complete path to the fabric configuration file.
:param prune_limit: The number of DELETEs that should be
done in one batch.
:param cmd: Indicates the type of re-sharding operation (move, split)
:update_only: Only update the state store and skip provisioning.
"""
source_group = Group.fetch(source_group_id)
move_source_server = _services_utils.fetch_backup_server(source_group)
#Do the backup of the group hosting the source shard.
backup_image = _backup.MySQLDump.backup(
move_source_server,
config_file,
mysqldump_binary
)
#Change the master for the server that is master of the group which hosts
#the destination shard.
_events.trigger_within_procedure(
RESTORE_SHARD_BACKUP,
shard_id,
source_group_id,
destn_group_id,
mysqlclient_binary,
backup_image.path,
split_value,
config_file,
prune_limit,
cmd
)
@_events.on_event(RESTORE_SHARD_BACKUP)
def _restore_shard_backup(shard_id, source_group_id, destn_group_id,
mysqlclient_binary, backup_image,
split_value, config_file, prune_limit, cmd):
"""Restore the backup on the destination Group.
:param shard_id: The shard ID of the shard that needs to be moved.
:param source_group_id: The group_id of the source shard.
:param destn_group_id: The ID of the group to which the shard needs to
be moved.
:param mysqlclient_binary: The fully qualified mysqlclient binary.
:param backup_image: The destination file that contains the backup
of the source shard.
:param split_value: Indicates the value at which the range for the
particular shard will be split. Will be set only
for shard split operations.
:param config_file: The complete path to the fabric configuration file.
:param prune_limit: The number of DELETEs that should be
done in one batch.
:param cmd: Indicates the type of re-sharding operation
"""
destn_group = Group.fetch(destn_group_id)
if destn_group is None:
raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
(destn_group_id, ))
#Build a backup image that will be used for restoring
bk_img = _backup.BackupImage(backup_image)
for destn_group_server in destn_group.servers():
destn_group_server.connect()
_backup.MySQLDump.restore_fabric_server(
destn_group_server,
bk_img,
config_file,
mysqlclient_binary
)
#Setup sync between the source and the destination groups.
_events.trigger_within_procedure(
SETUP_REPLICATION,
shard_id,
source_group_id,
destn_group_id,
split_value,
prune_limit,
cmd
)
@_events.on_event(SETUP_REPLICATION)
def _setup_replication(shard_id, source_group_id, destn_group_id, split_value,
prune_limit, cmd):
"""Setup replication between the source and the destination groups and
ensure that they are in sync.
:param shard_id: The shard ID of the shard that needs to be moved.
:param source_group_id: The group_id of the source shard.
:param destn_group_id: The ID of the group to which the shard needs to
be moved.
:param split_value: Indicates the value at which the range for the
particular shard will be split. Will be set only
for shard split operations.
:param prune_limit: The number of DELETEs that should be
done in one batch.
:param cmd: Indicates the type of re-sharding operation
"""
source_group = Group.fetch(source_group_id)
if source_group is None:
raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
(source_group_id, ))
destination_group = Group.fetch(destn_group_id)
if destination_group is None:
raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
(destn_group_id, ))
master = MySQLServer.fetch(source_group.master)
if master is None:
raise _errors.ShardingError(
_services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
master.connect()
slave = MySQLServer.fetch(destination_group.master)
if slave is None:
raise _errors.ShardingError(
_services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
slave.connect()
#Stop and reset any slave that might be running on the slave server.
_replication.stop_slave(slave, wait=True)
_replication.reset_slave(slave, clean=True)
#Change the master to the shard group master.
_replication.switch_master(slave, master, master.user, master.passwd)
#Start the slave so that syncing of the data begins
_replication.start_slave(slave, wait=True)
#Setup sync between the source and the destination groups.
_events.trigger_within_procedure(
SETUP_SYNC,
shard_id,
source_group_id,
destn_group_id,
split_value,
prune_limit,
cmd
)
@_events.on_event(SETUP_SYNC)
def _setup_sync(shard_id, source_group_id, destn_group_id, split_value,
prune_limit, cmd):
"""sync the source and the destination groups.
:param shard_id: The shard ID of the shard that needs to be moved.
:param source_group_id: The group_id of the source shard.
:param destn_group_id: The ID of the group to which the shard needs to
be moved.
:param split_value: Indicates the value at which the range for the
particular shard will be split. Will be set only
for shard split operations.
:param prune_limit: The number of DELETEs that should be
done in one batch.
:param cmd: Indicates the type of re-sharding operation
"""
source_group = Group.fetch(source_group_id)
if source_group is None:
raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
(source_group_id, ))
destination_group = Group.fetch(destn_group_id)
if destination_group is None:
raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
(destn_group_id, ))
master = MySQLServer.fetch(source_group.master)
if master is None:
raise _errors.ShardingError(
_services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
master.connect()
slave = MySQLServer.fetch(destination_group.master)
if slave is None:
raise _errors.ShardingError(
_services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
slave.connect()
#Synchronize until the slave catches up with the master.
_replication.synchronize_with_read_only(slave, master)
#Reset replication once the syncing is done.
_replication.stop_slave(slave, wait=True)
_replication.reset_slave(slave, clean=True)
#Trigger changing the mappings for the shard that was copied
_events.trigger_within_procedure(
SETUP_RESHARDING_SWITCH,
shard_id,
source_group_id,
destn_group_id,
split_value,
prune_limit,
cmd
)
@_events.on_event(SETUP_RESHARDING_SWITCH)
def _setup_resharding_switch(shard_id, source_group_id, destination_group_id,
split_value, prune_limit, cmd, update_only=False):
"""Setup the shard move or shard split workflow based on the command
argument.
:param shard_id: The ID of the shard that needs to be re-sharded.
:param source_group_id: The ID of the source group.
:param destination_group_id: The ID of the destination group.
:param split_value: The value at which the shard needs to be split
(in the case of a shard split operation).
:param prune_limit: The number of DELETEs that should be
done in one batch.
:param cmd: whether the operation that needs to be split is a
MOVE or a SPLIT operation.
:update_only: Only update the state store and skip provisioning.
"""
if cmd == "MOVE":
_setup_shard_switch_move(
shard_id, source_group_id, destination_group_id,
update_only
)
elif cmd == "SPLIT":
_setup_shard_switch_split(
shard_id, source_group_id, destination_group_id, split_value,
prune_limit, cmd, update_only
)
def _setup_shard_switch_split(shard_id, source_group_id, destination_group_id,
split_value, prune_limit, cmd, update_only):
"""Setup the moved shard to map to the new group.
:param shard_id: The shard ID of the shard that needs to be moved.
:param source_group_id: The group_id of the source shard.
:param destn_group_id: The ID of the group to which the shard needs to
be moved.
:param split_value: Indicates the value at which the range for the
particular shard will be split. Will be set only
for shard split operations.
:param prune_limit: The number of DELETEs that should be
done in one batch.
:param cmd: Indicates the type of re-sharding operation.
:update_only: Only update the state store and skip provisioning.
"""
#Fetch the Range sharding specification.
range_sharding_spec, source_shard, shard_mappings, shard_mapping_defn = \
_services_sharding.verify_and_fetch_shard(shard_id)
if not update_only:
#Fetch the metdata from the source shard
shard_meta_data = SHARD_METADATA.fetch_shard_meta_data(
shard_id, source_group_id)
lower_bound = shard_meta_data["lower_bound"]
upper_bound = shard_meta_data["upper_bound"]
#Disable the old shard
source_shard.disable()
#Remove the old shard.
range_sharding_spec.remove()
source_shard.remove()
destination_group = Group.fetch(destination_group_id)
if destination_group is None:
raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
(destination_group_id, ))
destn_group_master = MySQLServer.fetch(destination_group.master)
if destn_group_master is None:
raise _errors.ShardingError(
_services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
destn_group_master.connect()
#Make the destination group as read only to disable updates until the
#connectors update their caches, thus avoiding inconsistency.
destn_group_master.read_only = True
#Add the new shards. Generate new shard IDs for the shard being
#split and also for the shard that is created as a result of the split.
new_shard_1 = Shards.add(source_shard.group_id, "DISABLED")
new_shard_2 = Shards.add(destination_group_id, "DISABLED")
if not update_only:
#The backup has been restored on both the new shards. Hence both
#of them will have the old trigger ranges defined, which needs to be
#changed.
SHARD_METADATA.delete_shard_meta_data(source_shard.group_id, shard_id)
SHARD_METADATA.delete_shard_meta_data(destination_group_id, shard_id)
#Both of the shard mappings associated with this shard_id should
#be of the same sharding type. Hence it is safe to use one of the
#shard mappings.
if shard_mappings[0].type_name == "HASH":
#In the case of a split involving a HASH sharding scheme,
#the shard that is split gets a new shard_id, while the split
#gets the new computed lower_bound and also a new shard id.
#NOTE: How the shard that is split retains its lower_bound.
HashShardingSpecification.add_hash_split(
range_sharding_spec.shard_mapping_id,
new_shard_1.shard_id,
range_sharding_spec.lower_bound
)
HashShardingSpecification.add_hash_split(
range_sharding_spec.shard_mapping_id,
new_shard_2.shard_id,
split_value
)
else:
#Add the new ranges. Note that the shard being split retains
#its lower_bound, while the new shard gets the computed,
#lower_bound.
RangeShardingSpecification.add(
range_sharding_spec.shard_mapping_id,
range_sharding_spec.lower_bound,
new_shard_1.shard_id
)
RangeShardingSpecification.add(
range_sharding_spec.shard_mapping_id,
split_value,
new_shard_2.shard_id
)
#The sleep ensures that the connector have refreshed their caches with the
#new shards that have been added as a result of the split.
time.sleep(_utils.TTL)
#The source shard group master would have been marked as read only
#during the sync. Remove the read_only flag.
source_group = Group.fetch(source_group_id)
if source_group is None:
raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
(source_group_id, ))
source_group_master = MySQLServer.fetch(source_group.master)
if source_group_master is None:
raise _errors.ShardingError(
_services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
source_group_master.connect()
#Kill all the existing connections on the servers
source_group.kill_connections_on_servers()
#Allow connections on the source group master
source_group_master.read_only = False
#Allow connections on the destination group master
destn_group_master.read_only = False
#Setup replication for the new group from the global server
_group_replication.setup_group_replication \
(shard_mapping_defn[2], destination_group_id)
if not update_only:
#update the sharding metadata for both the shards.
SHARD_METADATA.insert_shard_meta_data(
new_shard_1.shard_id,
lower_bound,
split_value,
new_shard_1.group_id
)
SHARD_METADATA.insert_shard_meta_data(
new_shard_2.shard_id,
split_value,
upper_bound,
new_shard_2.group_id
)
#Enable the split shards
new_shard_1.enable()
new_shard_2.enable()
#Trigger changing the mappings for the shard that was copied
if not update_only:
_events.trigger_within_procedure(
PRUNE_SHARDS, new_shard_1.shard_id, new_shard_2.shard_id, prune_limit
)
@_events.on_event(PRUNE_SHARDS)
def _prune_shard_tables_after_split(shard_id_1, shard_id_2, prune_limit):
"""Prune the two shards generated after a split.
:param shard_id_1: The first shard id after the split.
:param shard_id_2: The second shard id after the split.
:param prune_limit: The number of DELETEs that should be
done in one batch.
"""
#Fetch the Range sharding specification. When we start implementing
#heterogenous sharding schemes, we need to find out the type of
#sharding scheme and we should use that to find out the sharding
#implementation.
_, _, shard_mappings, _ = _services_sharding.\
verify_and_fetch_shard(shard_id_1)
#All the shard mappings associated with this shard_id should be
#of the same type. Hence it is safe to use one of them.
try:
SHARDING_SPECIFICATION_HANDLER[shard_mappings[0].type_name].\
prune_shard_id(shard_id_1, shard_mappings[0].type_name, prune_limit)
except _errors.DatabaseError as error:
if error.errno == ER_NO_SUCH_TABLE:
#Error happens because the actual tables are not present in the
#server. We will ignore this.
pass
else:
raise error
try:
SHARDING_SPECIFICATION_HANDLER[shard_mappings[0].type_name].\
prune_shard_id(shard_id_2, shard_mappings[0].type_name, prune_limit)
except _errors.DatabaseError as error:
if error.errno == ER_NO_SUCH_TABLE:
#Error happens because the actual tables are not present in the
#server. We will ignore this.
pass
else:
raise error
def _setup_shard_switch_move(shard_id, source_group_id, destination_group_id,
update_only):
"""Setup the moved shard to map to the new group.
:param shard_id: The shard ID of the shard that needs to be moved.
:param source_group_id: The group_id of the source shard.
:param destination_group_id: The ID of the group to which the shard
needs to be moved.
:update_only: Only update the state store and skip provisioning.
"""
#Fetch the Range sharding specification. When we start implementing
#heterogenous sharding schemes, we need to find out the type of
#sharding scheme and we should use that to find out the sharding
#implementation.
_, source_shard, _, shard_mapping_defn = \
_services_sharding.verify_and_fetch_shard(shard_id)
destination_group = Group.fetch(destination_group_id)
if destination_group is None:
raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
(destination_group_id, ))
destn_group_master = MySQLServer.fetch(destination_group.master)
if destn_group_master is None:
raise _errors.ShardingError(
_services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
destn_group_master.connect()
#Set the destination group master to read_only
destn_group_master.read_only = True
#Setup replication between the shard group and the global group.
_group_replication.setup_group_replication \
(shard_mapping_defn[2], destination_group_id)
#set the shard to point to the new group.
source_shard.group_id = destination_group_id
#Stop the replication between the global server and the original
#group associated with the shard.
_group_replication.stop_group_slave\
(shard_mapping_defn[2], source_group_id, True)
#The sleep ensures that the connector have refreshed their caches with the
#new shards that have been added as a result of the split.
time.sleep(_utils.TTL)
#Reset the read only flag on the source server.
source_group = Group.fetch(source_group_id)
if source_group is None:
raise _errors.ShardingError(_services_sharding.SHARD_GROUP_NOT_FOUND %
(source_group_id, ))
master = MySQLServer.fetch(source_group.master)
if master is None:
raise _errors.ShardingError(
_services_sharding.SHARD_GROUP_MASTER_NOT_FOUND)
if not update_only:
master.connect()
master.read_only = False
#Kill all the existing connections on the servers
source_group.kill_connections_on_servers()
#allow updates in the destination group master
destn_group_master.read_only = False
| ioggstream/mysql-utilities | mysql/fabric/services/resharding.py | Python | gpl-2.0 | 35,111 |
# Given a char array representing tasks CPU need to do. It contains capital letters A to Z where different letters represent different tasks.Tasks could be done without original order. Each task could be done in one interval. For each interval, CPU could finish one task or just be idle.
# However, there is a non-negative cooling interval n that means between two same tasks, there must be at least n intervals that CPU are doing different tasks or just be idle.
# You need to return the least number of intervals the CPU will take to finish all the given tasks.
# Example 1:
# Input: tasks = ["A","A","A","B","B","B"], n = 2
# Output: 8
# Explanation: A -> B -> idle -> A -> B -> idle -> A -> B.
# Note:
# The number of tasks is in the range [1, 10000].
# The integer n is in the range [0, 100].
# looks like a greedy approach would work
import collections
class Solution(object):
def leastInterval(self, tasks, n):
task_table = collections.defaultdict(lambda: 0)
for task in tasks:
task_table[task] += 1
last_task, index = {task: None for task in task_table}, 0
while task_table:
to_do = None
for task in last_task:
if last_task[task] == None or index - last_task[task] > n:
to_do = task
break
if to_do:
task_table[to_do] -= 1
last_task[to_do] = index
if task_table[to_do] == 0:
del task_table[to_do]
del last_task[to_do]
index += 1
return index
s = Solution()
print(s.leastInterval(["A","B","C","A","B"], 2)) | seanxwzhang/LeetCode | 621 Task Scheduler/solution.py | Python | mit | 1,666 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.linalg import linalg as linalg_lib
from tensorflow.python.ops.linalg import linear_operator_test_util
from tensorflow.python.platform import test
random_seed.set_random_seed(23)
rng = np.random.RandomState(2016)
class LinearOperatorIdentityTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@property
def _dtypes_to_test(self):
# TODO(langmore) Test tf.float16 once tf.matrix_solve works in
# 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
assert shape[-1] == shape[-2]
batch_shape = shape[:-2]
num_rows = shape[-1]
operator = linalg_lib.LinearOperatorIdentity(
num_rows, batch_shape=batch_shape, dtype=dtype)
mat = linalg_ops.eye(num_rows, batch_shape=batch_shape, dtype=dtype)
return operator, mat
def test_assert_positive_definite(self):
with self.test_session():
operator = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator.assert_positive_definite().run() # Should not fail
def test_assert_non_singular(self):
with self.test_session():
operator = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator.assert_non_singular().run() # Should not fail
def test_assert_self_adjoint(self):
with self.test_session():
operator = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator.assert_self_adjoint().run() # Should not fail
def test_float16_matmul(self):
# float16 cannot be tested by base test class because tf.matrix_solve does
# not work with float16.
with self.test_session():
operator = linalg_lib.LinearOperatorIdentity(
num_rows=2, dtype=dtypes.float16)
x = rng.randn(2, 3).astype(np.float16)
y = operator.matmul(x)
self.assertAllClose(x, y.eval())
def test_non_scalar_num_rows_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
linalg_lib.LinearOperatorIdentity(num_rows=[2])
def test_non_integer_num_rows_raises_static(self):
with self.assertRaisesRegexp(TypeError, "must be integer"):
linalg_lib.LinearOperatorIdentity(num_rows=2.)
def test_negative_num_rows_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorIdentity(num_rows=-2)
def test_non_1d_batch_shape_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be a 1-D"):
linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=2)
def test_non_integer_batch_shape_raises_static(self):
with self.assertRaisesRegexp(TypeError, "must be integer"):
linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=[2.])
def test_negative_batch_shape_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
linalg_lib.LinearOperatorIdentity(num_rows=2, batch_shape=[-2])
def test_non_scalar_num_rows_raises_dynamic(self):
with self.test_session():
num_rows = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorIdentity(
num_rows, assert_proper_shapes=True)
with self.assertRaisesOpError("must be a 0-D Tensor"):
operator.to_dense().eval(feed_dict={num_rows: [2]})
def test_negative_num_rows_raises_dynamic(self):
with self.test_session():
num_rows = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorIdentity(
num_rows, assert_proper_shapes=True)
with self.assertRaisesOpError("must be non-negative"):
operator.to_dense().eval(feed_dict={num_rows: -2})
def test_non_1d_batch_shape_raises_dynamic(self):
with self.test_session():
batch_shape = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorIdentity(
num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
with self.assertRaisesOpError("must be a 1-D"):
operator.to_dense().eval(feed_dict={batch_shape: 2})
def test_negative_batch_shape_raises_dynamic(self):
with self.test_session():
batch_shape = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorIdentity(
num_rows=2, batch_shape=batch_shape, assert_proper_shapes=True)
with self.assertRaisesOpError("must be non-negative"):
operator.to_dense().eval(feed_dict={batch_shape: [-2]})
def test_wrong_matrix_dimensions_raises_static(self):
operator = linalg_lib.LinearOperatorIdentity(num_rows=2)
x = rng.randn(3, 3).astype(np.float32)
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
operator.matmul(x)
def test_wrong_matrix_dimensions_raises_dynamic(self):
num_rows = array_ops.placeholder(dtypes.int32)
x = array_ops.placeholder(dtypes.float32)
with self.test_session():
operator = linalg_lib.LinearOperatorIdentity(
num_rows, assert_proper_shapes=True)
y = operator.matmul(x)
with self.assertRaisesOpError("Incompatible.*dimensions"):
y.eval(feed_dict={num_rows: 2, x: rng.rand(3, 3)})
def test_default_batch_shape_broadcasts_with_everything_static(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.test_session() as sess:
x = random_ops.random_normal(shape=(1, 2, 3, 4))
operator = linalg_lib.LinearOperatorIdentity(num_rows=3, dtype=x.dtype)
operator_matmul = operator.matmul(x)
expected = x
self.assertAllEqual(operator_matmul.get_shape(), expected.get_shape())
self.assertAllClose(*sess.run([operator_matmul, expected]))
def test_default_batch_shape_broadcasts_with_everything_dynamic(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32)
operator = linalg_lib.LinearOperatorIdentity(num_rows=3, dtype=x.dtype)
operator_matmul = operator.matmul(x)
expected = x
feed_dict = {x: rng.randn(1, 2, 3, 4)}
self.assertAllClose(
*sess.run([operator_matmul, expected], feed_dict=feed_dict))
def test_broadcast_matmul_static_shapes(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.test_session() as sess:
# Given this x and LinearOperatorIdentity shape of (2, 1, 3, 3), the
# broadcast shape of operator and 'x' is (2, 2, 3, 4)
x = random_ops.random_normal(shape=(1, 2, 3, 4))
operator = linalg_lib.LinearOperatorIdentity(
num_rows=3, batch_shape=(2, 1), dtype=x.dtype)
# Batch matrix of zeros with the broadcast shape of x and operator.
zeros = array_ops.zeros(shape=(2, 2, 3, 4), dtype=x.dtype)
# Expected result of matmul and solve.
expected = x + zeros
operator_matmul = operator.matmul(x)
self.assertAllEqual(operator_matmul.get_shape(), expected.get_shape())
self.assertAllClose(*sess.run([operator_matmul, expected]))
def test_broadcast_matmul_dynamic_shapes(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.test_session() as sess:
# Given this x and LinearOperatorIdentity shape of (2, 1, 3, 3), the
# broadcast shape of operator and 'x' is (2, 2, 3, 4)
x = array_ops.placeholder(dtypes.float32)
num_rows = array_ops.placeholder(dtypes.int32)
batch_shape = array_ops.placeholder(dtypes.int32)
operator = linalg_lib.LinearOperatorIdentity(
num_rows, batch_shape=batch_shape)
feed_dict = {x: rng.rand(1, 2, 3, 4), num_rows: 3, batch_shape: (2, 1)}
# Batch matrix of zeros with the broadcast shape of x and operator.
zeros = array_ops.zeros(shape=(2, 2, 3, 4), dtype=x.dtype)
# Expected result of matmul and solve.
expected = x + zeros
operator_matmul = operator.matmul(x)
self.assertAllClose(
*sess.run([operator_matmul, expected], feed_dict=feed_dict))
def test_is_x_flags(self):
# The is_x flags are by default all True.
operator = linalg_lib.LinearOperatorIdentity(num_rows=2)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint)
# Any of them False raises because the identity is always self-adjoint etc..
with self.assertRaisesRegexp(ValueError, "is always non-singular"):
operator = linalg_lib.LinearOperatorIdentity(
num_rows=2,
is_non_singular=None,
)
class LinearOperatorScaledIdentityTest(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@property
def _dtypes_to_test(self):
# TODO(langmore) Test tf.float16 once tf.matrix_solve works in
# 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
def _operator_and_matrix(self, build_info, dtype, use_placeholder):
shape = list(build_info.shape)
assert shape[-1] == shape[-2]
batch_shape = shape[:-2]
num_rows = shape[-1]
# Uniform values that are at least length 1 from the origin. Allows the
# operator to be well conditioned.
# Shape batch_shape
multiplier = linear_operator_test_util.random_sign_uniform(
shape=batch_shape, minval=1., maxval=2., dtype=dtype)
# Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args.
lin_op_multiplier = multiplier
if use_placeholder:
lin_op_multiplier = array_ops.placeholder_with_default(
multiplier, shape=None)
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows, lin_op_multiplier)
multiplier_matrix = array_ops.expand_dims(
array_ops.expand_dims(multiplier, -1), -1)
matrix = multiplier_matrix * linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=dtype)
return operator, matrix
def test_assert_positive_definite_does_not_raise_when_positive(self):
with self.test_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=1.)
operator.assert_positive_definite().run() # Should not fail
def test_assert_positive_definite_raises_when_negative(self):
with self.test_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=-1.)
with self.assertRaisesOpError("not positive definite"):
operator.assert_positive_definite().run()
def test_assert_non_singular_does_not_raise_when_non_singular(self):
with self.test_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1., 2., 3.])
operator.assert_non_singular().run() # Should not fail
def test_assert_non_singular_raises_when_singular(self):
with self.test_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1., 2., 0.])
with self.assertRaisesOpError("was singular"):
operator.assert_non_singular().run()
def test_assert_self_adjoint_does_not_raise_when_self_adjoint(self):
with self.test_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1. + 0J])
operator.assert_self_adjoint().run() # Should not fail
def test_assert_self_adjoint_raises_when_not_self_adjoint(self):
with self.test_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1. + 1J])
with self.assertRaisesOpError("not self-adjoint"):
operator.assert_self_adjoint().run()
def test_float16_matmul(self):
# float16 cannot be tested by base test class because tf.matrix_solve does
# not work with float16.
with self.test_session():
multiplier = rng.rand(3).astype(np.float16)
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=multiplier)
x = rng.randn(2, 3).astype(np.float16)
y = operator.matmul(x)
self.assertAllClose(multiplier[..., None, None] * x, y.eval())
def test_non_scalar_num_rows_raises_static(self):
# Many "test_...num_rows" tests are performed in LinearOperatorIdentity.
with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
linalg_lib.LinearOperatorScaledIdentity(
num_rows=[2], multiplier=123.)
def test_wrong_matrix_dimensions_raises_static(self):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=2.2)
x = rng.randn(3, 3).astype(np.float32)
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
operator.matmul(x)
def test_wrong_matrix_dimensions_raises_dynamic(self):
num_rows = array_ops.placeholder(dtypes.int32)
x = array_ops.placeholder(dtypes.float32)
with self.test_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows, multiplier=[1., 2], assert_proper_shapes=True)
y = operator.matmul(x)
with self.assertRaisesOpError("Incompatible.*dimensions"):
y.eval(feed_dict={num_rows: 2, x: rng.rand(3, 3)})
def test_broadcast_matmul_and_solve(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.test_session() as sess:
# Given this x and LinearOperatorScaledIdentity shape of (2, 1, 3, 3), the
# broadcast shape of operator and 'x' is (2, 2, 3, 4)
x = random_ops.random_normal(shape=(1, 2, 3, 4))
# operator is 2.2 * identity (with a batch shape).
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=3, multiplier=2.2 * array_ops.ones((2, 1)))
# Batch matrix of zeros with the broadcast shape of x and operator.
zeros = array_ops.zeros(shape=(2, 2, 3, 4), dtype=x.dtype)
# Test matmul
expected = x * 2.2 + zeros
operator_matmul = operator.matmul(x)
self.assertAllEqual(operator_matmul.get_shape(), expected.get_shape())
self.assertAllClose(*sess.run([operator_matmul, expected]))
# Test solve
expected = x / 2.2 + zeros
operator_solve = operator.solve(x)
self.assertAllEqual(operator_solve.get_shape(), expected.get_shape())
self.assertAllClose(*sess.run([operator_solve, expected]))
def test_broadcast_matmul_and_solve_scalar_scale_multiplier(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.test_session() as sess:
# Given this x and LinearOperatorScaledIdentity shape of (3, 3), the
# broadcast shape of operator and 'x' is (1, 2, 3, 4), which is the same
# shape as x.
x = random_ops.random_normal(shape=(1, 2, 3, 4))
# operator is 2.2 * identity (with a batch shape).
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=3, multiplier=2.2)
# Test matmul
expected = x * 2.2
operator_matmul = operator.matmul(x)
self.assertAllEqual(operator_matmul.get_shape(), expected.get_shape())
self.assertAllClose(*sess.run([operator_matmul, expected]))
# Test solve
expected = x / 2.2
operator_solve = operator.solve(x)
self.assertAllEqual(operator_solve.get_shape(), expected.get_shape())
self.assertAllClose(*sess.run([operator_solve, expected]))
def test_is_x_flags(self):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=1.,
is_positive_definite=False, is_non_singular=True)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint is None)
if __name__ == "__main__":
test.main()
| jart/tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py | Python | apache-2.0 | 17,629 |
"""
Description:
* An interface is defined for creating an object.
* Comparing to simple factory, subclasses decide which class is instantiated.
@author: Paul Bodean
@date: 12/08/2017
"""
from abc import ABCMeta, abstractmethod
from typing import Union
from selenium.webdriver import Chrome, Firefox
from src.factory.pages.menu import Menu
from src.factory.pages.search import Search
class Component(object):
"""
Abstract class defining how a tested component will look
"""
@abstractmethod
def set_name(self):
pass
@abstractmethod
def get_actions(self, *args):
pass
class SearchComponent(Component, Search):
"""
Each new product will implement specific actions
"""
def set_name(self):
return 'Youtube search component'
def get_actions(self, *args: list):
"""
:type args: list
"""
if args[0] == 'click_search':
self.search()
elif args[0] == 'set_query':
self.set_query(args[1])
else:
raise NotImplemented
class MenuComponent(Component, Menu):
"""
Menu specific component are implemented
"""
def set_name(self):
return 'Youtube menu component'
def get_actions(self, *args: list):
"""
:type args: list
"""
if args[0] == 'click_menu':
self.menu_button()
elif args[0] == 'trend':
self.filter_by_trend()
elif args[0] == 'history':
self.filter_by_history()
elif args[0] == 'browse':
self.browse()
else:
raise NotImplemented
class TemplateTest(metaclass=ABCMeta):
"""
TestCase abstract class provide a factory method _create_test which should be implemented by concrete classes
"""
def __init__(self):
self.sections = dict()
self.create_test()
@abstractmethod
def create_test(self):
"""
Factory abstract method
"""
pass
def get_sections(self) -> dict:
"""
:return: all section to be tested in a TestCase
:rtype: list
"""
return self.sections
def add_sections(self, section_key: str, section: object):
"""
:param section_key: section key name
:type section_key: str
:param section: a section to be tested
:type section: object
:return: all sections to be tested
:rtype: list
"""
self.sections.update({section_key: section})
class MenuTest(TemplateTest):
"""
Implement Test Menu class
"""
def __init__(self, driver: Union[Chrome, Firefox]):
"""
:param driver: browser driver
:type driver: object
"""
self.__driver = driver
super().__init__()
def create_test(self):
"""
:return: sections to be tested
:rtype: dict
"""
self.add_sections('menu', MenuComponent(self.__driver))
class MenuAndSearchTest(TemplateTest):
"""
Implement a test case for checking menu and search
"""
def __init__(self, driver: Union[Chrome, Firefox]):
"""
:param driver: browser driver
:type driver: object
"""
self.__driver = driver
super().__init__()
def create_test(self):
"""
:return: sections to be tested
:rtype: dict
"""
self.add_sections('menu', MenuComponent(self.__driver))
self.add_sections('search', SearchComponent(self.__driver))
| paulbodean88/automation-design-patterns | src/factory/factory_method.py | Python | mit | 3,564 |
"""Tests for the system_log component."""
| fbradyirl/home-assistant | tests/components/system_log/__init__.py | Python | apache-2.0 | 42 |
pytest_plugins = "pytester"
def test_exceptions_dont_cause_leaking_between_tests(testdir, capsys):
testdir.makepyfile("""
from doubles.targets.expectation_target import expect
from doubles.testing import User
def test_that_sets_expectation_then_raises():
expect(User).class_method.with_args(1).once()
raise Exception('Bob')
def test_that_should_pass():
assert True
""")
result = testdir.runpytest()
outcomes = result.parseoutcomes()
assert outcomes['failed'] == 1
assert outcomes['passed'] == 1
def test_failed_expections_do_not_leak_between_tests(testdir, capsys):
testdir.makepyfile("""
from doubles.targets.expectation_target import expect
from doubles.testing import User
def test_that_fails_for_not_satisfying_expectation():
expect(User).class_method.with_args('test_one').once()
def test_that_should_fail_for_not_satisfying_expection():
expect(User).class_method.with_args('test_two').once()
""")
result = testdir.runpytest()
outcomes = result.parseoutcomes()
assert outcomes['failed'] == 2
expected_error = (
"*Expected 'class_method' to be called 1 time instead of 0 times on"
" <class 'doubles.testing.User'> with ('{arg_value}')*"
)
result.stdout.fnmatch_lines([expected_error.format(arg_value="test_one")])
result.stdout.fnmatch_lines([expected_error.format(arg_value="test_two")])
| uber/doubles | test/pytest_test.py | Python | mit | 1,505 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Role.description'
db.alter_column(u'units_role', 'description', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Role.description'
raise RuntimeError("Cannot reverse this migration. 'Role.description' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Role.description'
db.alter_column(u'units_role', 'description', self.gf('django.db.models.fields.TextField')())
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'units.role': {
'Meta': {'object_name': 'Role'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_epfl': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ordre': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'units.rolelogging': {
'Meta': {'object_name': 'RoleLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['units.Role']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'units.unit': {
'Meta': {'object_name': 'Unit'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_epfl': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'is_commission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_equipe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent_herachique': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'units.unitlogging': {
'Meta': {'object_name': 'UnitLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['units.Unit']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'users.truffeuser': {
'Meta': {'object_name': 'TruffeUser'},
'adresse': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'default': "'.'", 'max_length': '1'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'iban_ou_ccp': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'nom_banque': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['units'] | ArcaniteSolutions/truffe2 | truffe2/units/migrations/0003_auto__chg_field_role_description.py | Python | bsd-2-clause | 7,549 |
# Copyright 2019 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import flask
import voluptuous
from werkzeug import exceptions as http_exceptions
from cloudkitty.api.v2 import base
from cloudkitty.api.v2 import utils as api_utils
from cloudkitty.common import policy
from cloudkitty import messaging
from cloudkitty import storage_state
from cloudkitty import tzutils
from cloudkitty import validation_utils as vutils
class ScopeState(base.BaseResource):
@classmethod
def reload(cls):
super(ScopeState, cls).reload()
cls._client = messaging.get_client()
cls._storage_state = storage_state.StateManager()
@api_utils.paginated
@api_utils.add_input_schema('query', {
voluptuous.Optional('scope_id', default=[]):
api_utils.MultiQueryParam(str),
voluptuous.Optional('scope_key', default=[]):
api_utils.MultiQueryParam(str),
voluptuous.Optional('fetcher', default=[]):
api_utils.MultiQueryParam(str),
voluptuous.Optional('collector', default=[]):
api_utils.MultiQueryParam(str),
})
@api_utils.add_output_schema({'results': [{
voluptuous.Required('scope_id'): vutils.get_string_type(),
voluptuous.Required('scope_key'): vutils.get_string_type(),
voluptuous.Required('fetcher'): vutils.get_string_type(),
voluptuous.Required('collector'): vutils.get_string_type(),
voluptuous.Required('state'): vutils.get_string_type(),
}]})
def get(self,
offset=0,
limit=100,
scope_id=None,
scope_key=None,
fetcher=None,
collector=None):
policy.authorize(
flask.request.context,
'scope:get_state',
{'tenant_id': scope_id or flask.request.context.project_id}
)
results = self._storage_state.get_all(
identifier=scope_id,
scope_key=scope_key,
fetcher=fetcher,
collector=collector,
offset=offset,
limit=limit,
)
if len(results) < 1:
raise http_exceptions.NotFound(
"No resource found for provided filters.")
return {
'results': [{
'scope_id': r.identifier,
'scope_key': r.scope_key,
'fetcher': r.fetcher,
'collector': r.collector,
'state': r.state.isoformat(),
} for r in results]
}
@api_utils.add_input_schema('body', {
voluptuous.Exclusive('all_scopes', 'scope_selector'):
voluptuous.Boolean(),
voluptuous.Exclusive('scope_id', 'scope_selector'):
api_utils.MultiQueryParam(str),
voluptuous.Optional('scope_key', default=[]):
api_utils.MultiQueryParam(str),
voluptuous.Optional('fetcher', default=[]):
api_utils.MultiQueryParam(str),
voluptuous.Optional('collector', default=[]):
api_utils.MultiQueryParam(str),
voluptuous.Required('state'):
voluptuous.Coerce(tzutils.dt_from_iso),
})
def put(self,
all_scopes=False,
scope_id=None,
scope_key=None,
fetcher=None,
collector=None,
state=None):
policy.authorize(
flask.request.context,
'scope:reset_state',
{'tenant_id': scope_id or flask.request.context.project_id}
)
if not all_scopes and scope_id is None:
raise http_exceptions.BadRequest(
"Either all_scopes or a scope_id should be specified.")
results = self._storage_state.get_all(
identifier=scope_id,
scope_key=scope_key,
fetcher=fetcher,
collector=collector,
)
if len(results) < 1:
raise http_exceptions.NotFound(
"No resource found for provided filters.")
serialized_results = [{
'scope_id': r.identifier,
'scope_key': r.scope_key,
'fetcher': r.fetcher,
'collector': r.collector,
} for r in results]
self._client.cast({}, 'reset_state', res_data={
'scopes': serialized_results, 'state': state.isoformat(),
})
return {}, 202
| stackforge/cloudkitty | cloudkitty/api/v2/scope/state.py | Python | apache-2.0 | 4,899 |
"""
mbed CMSIS-DAP debugger
Copyright (c) 2006-2018 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..core.coresight_target import CoreSightTarget
from .family import (target_kinetis, flash_cortex_m)
from . import target_MIMXRT1021xxxxx
from . import target_MIMXRT1052xxxxB
from . import target_MKE15Z256xxx7
from . import target_MKE18F256xxx16
from . import target_MKL02Z32xxx4
from . import target_MKL05Z32xxx4
from . import target_MKL25Z128xxx4
from . import target_MKL26Z256xxx4
from . import target_MKL27Z256xxx4
from . import target_MKL28Z512xxx7
from . import target_MKL43Z256xxx4
from . import target_MKL46Z256xxx4
from . import target_MKL82Z128xxx7
from . import target_MKV10Z128xxx7
from . import target_MKV11Z128xxx7
from . import target_MKW01Z128xxx4
from . import target_MKW24D512xxx5
from . import target_MKW36Z512xxx4
from . import target_MKW40Z160xxx4
from . import target_MKW41Z512xxx4
from . import target_MK22FN1M0Axxx12
from . import target_MK22FN512xxx12
from . import target_MK28FN2M0xxx15
from . import target_MK64FN1M0xxx12
from . import target_MK66FN2M0xxx18
from . import target_MK82FN256xxx15
from . import target_MK20DX128xxx5
from . import target_K32W042S1M2xxx
from . import target_lpc800
from . import target_LPC11U24FBD64_401
from . import target_LPC1768
from . import target_LPC4330
from . import target_nRF51822_xxAA
from . import target_nRF52832_xxAA
from . import target_nRF52840_xxAA
from . import target_STM32F103RC
from . import target_STM32F051T8
from . import target_STM32F412xx
from . import target_STM32F439xx
from . import target_STM32L475xx
from . import target_MAX32600
from . import target_w7500
from . import target_LPC1114FN28_102
from . import target_LPC824M201JHI33
from . import target_LPC54114J256BD64
from . import target_LPC54608J512ET180
from . import target_ncs36510
from . import target_LPC4088FBD144
from . import target_lpc4088qsb
from . import target_lpc4088dm
from . import target_RTL8195AM
from . import target_CC3220SF
from . import target_STM32F031xx
from . import target_STM32F051xx
from . import target_STM32F071xx
from . import target_STM32F301xx
from . import target_STM32F405xx
from . import target_STM32F415xx
from . import target_STM32L486xx
from . import target_LPC11U35
TARGET = {
'cortex_m': CoreSightTarget,
'kinetis': target_kinetis.Kinetis,
'ke15z7': target_MKE15Z256xxx7.KE15Z7,
'ke18f16': target_MKE18F256xxx16.KE18F16,
'kl02z': target_MKL02Z32xxx4.KL02Z,
'kl05z': target_MKL05Z32xxx4.KL05Z,
'kl25z': target_MKL25Z128xxx4.KL25Z,
'kl26z': target_MKL26Z256xxx4.KL26Z,
'kl27z4': target_MKL27Z256xxx4.KL27Z4,
'kl28z': target_MKL28Z512xxx7.KL28x,
'kl43z4': target_MKL43Z256xxx4.KL43Z4,
'kl46z': target_MKL46Z256xxx4.KL46Z,
'kl82z7': target_MKL82Z128xxx7.KL82Z7,
'kv10z7': target_MKV10Z128xxx7.KV10Z7,
'kv11z7': target_MKV11Z128xxx7.KV11Z7,
'kw01z4': target_MKW01Z128xxx4.KW01Z4,
'kw24d5': target_MKW24D512xxx5.KW24D5,
'kw36z4': target_MKW36Z512xxx4.KW36Z4,
'kw40z4': target_MKW40Z160xxx4.KW40Z4,
'kw41z4': target_MKW41Z512xxx4.KW41Z4,
'k20d50m': target_MK20DX128xxx5.K20D50M,
'k22fa12': target_MK22FN1M0Axxx12.K22FA12,
'k22f': target_MK22FN512xxx12.K22F,
'k28f15': target_MK28FN2M0xxx15.K28F15,
'k64f': target_MK64FN1M0xxx12.K64F,
'k66f18': target_MK66FN2M0xxx18.K66F18,
'k82f25615': target_MK82FN256xxx15.K82F25615,
'k32w042s': target_K32W042S1M2xxx.K32W042S,
'lpc800': target_lpc800.LPC800,
'lpc11u24': target_LPC11U24FBD64_401.LPC11U24,
'lpc1768': target_LPC1768.LPC1768,
'lpc4330': target_LPC4330.LPC4330,
'mimxrt1020': target_MIMXRT1021xxxxx.MIMXRT1021xxxxx,
'mimxrt1050_quadspi': target_MIMXRT1052xxxxB.MIMXRT1052xxxxB_quadspi,
'mimxrt1050_hyperflash': target_MIMXRT1052xxxxB.MIMXRT1052xxxxB_hyperflash,
'mimxrt1050': target_MIMXRT1052xxxxB.MIMXRT1052xxxxB_hyperflash, # Alias for default external flash.
'nrf51': target_nRF51822_xxAA.NRF51,
'nrf52' : target_nRF52832_xxAA.NRF52,
'nrf52840' : target_nRF52840_xxAA.NRF52840,
'stm32f103rc': target_STM32F103RC.STM32F103RC,
#'stm32f051': target_STM32F051T8.STM32F051,
'stm32f412xe' : target_STM32F412xx.STM32F412xE,
'stm32f412xg' : target_STM32F412xx.STM32F412xG,
'stm32f439xg' : target_STM32F439xx.STM32F439xG,
'stm32f439xi' : target_STM32F439xx.STM32F439xI,
'stm32l475xc' : target_STM32L475xx.STM32L475xC,
'stm32l475xe' : target_STM32L475xx.STM32L475xE,
'stm32l475xg' : target_STM32L475xx.STM32L475xG,
'max32600': target_MAX32600.MAX32600,
'w7500': target_w7500.W7500,
'lpc11xx_32': target_LPC1114FN28_102.LPC11XX_32,
'lpc824': target_LPC824M201JHI33.LPC824,
'lpc54114': target_LPC54114J256BD64.LPC54114,
'lpc54608': target_LPC54608J512ET180.LPC54608,
'lpc4088': target_LPC4088FBD144.LPC4088,
'ncs36510': target_ncs36510.NCS36510,
'lpc4088qsb': target_lpc4088qsb.LPC4088qsb,
'lpc4088dm': target_lpc4088dm.LPC4088dm,
'rtl8195am': target_RTL8195AM.RTL8195AM,
'cc3220sf': target_CC3220SF.CC3220SF,
'stm32f031e6': target_STM32F031xx.STM32F031E6,
'stm32f051t8': target_STM32F051xx.STM32F051T8,
'stm32f071cb': target_STM32F071xx.STM32F071CB,
'stm32f301k8': target_STM32F301xx.STM32F301K8,
'stm32f405og': target_STM32F405xx.STM32F405OG,
'stm32f415og': target_STM32F415xx.STM32F415OG,
'stm32l486jg': target_STM32L486xx.STM32L486JG,
'lpc11u35' : target_LPC11U35.LPC11U35,
}
| mesheven/pyOCD | pyocd/target/__init__.py | Python | apache-2.0 | 6,399 |
# -*- coding: utf-8 -*-
from gitlint.tests.base import BaseTestCase
from gitlint.rules import AuthorValidEmail, RuleViolation
class MetaRuleTests(BaseTestCase):
def test_author_valid_email_rule(self):
rule = AuthorValidEmail()
# valid email addresses
valid_email_addresses = ["föo@bar.com", "Jöhn.Doe@bar.com", "jöhn+doe@bar.com", "jöhn/doe@bar.com",
"jöhn.doe@subdomain.bar.com"]
for email in valid_email_addresses:
commit = self.gitcommit("", author_email=email)
violations = rule.validate(commit)
self.assertIsNone(violations)
# No email address (=allowed for now, as gitlint also lints messages passed via stdin that don't have an
# email address)
commit = self.gitcommit("")
violations = rule.validate(commit)
self.assertIsNone(violations)
# Invalid email addresses: no TLD, no domain, no @, space anywhere (=valid but not allowed by gitlint)
invalid_email_addresses = ["föo@bar", "JöhnDoe", "Jöhn Doe", "Jöhn Doe@foo.com", " JöhnDoe@foo.com",
"JöhnDoe@ foo.com", "JöhnDoe@foo. com", "JöhnDoe@foo. com", "@bår.com",
"föo@.com"]
for email in invalid_email_addresses:
commit = self.gitcommit("", author_email=email)
violations = rule.validate(commit)
self.assertListEqual(violations,
[RuleViolation("M1", "Author email for commit is invalid", email)])
def test_author_valid_email_rule_custom_regex(self):
# regex=None -> the rule isn't applied
rule = AuthorValidEmail()
rule.options['regex'].set(None)
emailadresses = ["föo", None, "hür dür"]
for email in emailadresses:
commit = self.gitcommit("", author_email=email)
violations = rule.validate(commit)
self.assertIsNone(violations)
# Custom domain
rule = AuthorValidEmail({'regex': "[^@]+@bår.com"})
valid_email_addresses = [
"föo@bår.com", "Jöhn.Doe@bår.com", "jöhn+doe@bår.com", "jöhn/doe@bår.com"]
for email in valid_email_addresses:
commit = self.gitcommit("", author_email=email)
violations = rule.validate(commit)
self.assertIsNone(violations)
# Invalid email addresses
invalid_email_addresses = ["föo@hur.com"]
for email in invalid_email_addresses:
commit = self.gitcommit("", author_email=email)
violations = rule.validate(commit)
self.assertListEqual(violations,
[RuleViolation("M1", "Author email for commit is invalid", email)])
| jorisroovers/gitlint | gitlint-core/gitlint/tests/rules/test_meta_rules.py | Python | mit | 2,792 |
#!/usr/bin/env python
"""This is a script that removes all staffline, staffspace and staff
symbols, and all relationships that lead to them."""
from __future__ import print_function, unicode_literals
import argparse
import copy
import logging
import os
import time
from muscima.io import parse_cropobject_list, export_cropobject_list
STAFF_CLSNAMES = ['staff', 'staff_line', 'staff_space']
__version__ = "0.0.1"
__author__ = "Jan Hajic jr."
def build_argument_parser():
parser = argparse.ArgumentParser(description=__doc__, add_help=True,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--annot', action='store', required=True,
help='The annotation file for which the staffline and staff'
' CropObject relationships should be added.')
parser.add_argument('-e', '--export', action='store',
help='A filename to which the output CropObjectList'
' should be saved. If not given, will print to'
' stdout.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Turn on INFO messages.')
parser.add_argument('--debug', action='store_true',
help='Turn on DEBUG messages.')
return parser
def main(args):
logging.info('Starting main...')
_start_time = time.clock()
# Your code goes here
##########################################################################
logging.info('Import the CropObject list')
if not os.path.isfile(args.annot):
raise ValueError('Annotation file {0} not found!'
''.format(args.annot))
cropobjects = parse_cropobject_list(args.annot)
_cropobjects_dict = {c.objid: c for c in cropobjects}
##########################################################################
staff_cropobjects_dict = {c.objid: c for c in cropobjects
if c.clsname in STAFF_CLSNAMES}
output_cropobjects = []
for c in cropobjects:
if c.objid in staff_cropobjects_dict:
continue
new_c = copy.deepcopy(c)
new_c.inlinks = [i for i in c.inlinks
if i not in staff_cropobjects_dict]
new_c.outlinks = [o for o in c.outlinks
if o not in staff_cropobjects_dict]
output_cropobjects.append(new_c)
##########################################################################
logging.info('Export the stripped list.')
cropobject_string = export_cropobject_list(output_cropobjects)
if args.export is not None:
with open(args.export, 'w') as hdl:
hdl.write(cropobject_string)
else:
print(cropobject_string)
_end_time = time.clock()
logging.info('[XXXX] done in {0:.3f} s'.format(_end_time - _start_time))
if __name__ == '__main__':
parser = build_argument_parser()
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
if args.debug:
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
main(args)
| hajicj/muscima | scripts/strip_staffline_symbols.py | Python | mit | 3,277 |
# dialogs.folder
"""A collection of dialogs to do things to all fonts in a given folder."""
# import
from actions import actionsFolderDialog
from ufo2otf import UFOsToOTFsDialog
from otf2ufo import OTFsToUFOsDialog
from woff2ufo import WOFFsToUFOsDialog
# export
__all__ = [
'actionsFolderDialog',
'OTFsToUFOsDialog',
'UFOsToOTFsDialog',
'WOFFsToUFOsDialog',
]
| gferreira/hTools2_extension | hTools2.roboFontExt/lib/hTools2/dialogs/folder/__init__.py | Python | bsd-3-clause | 382 |
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2011,2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Wire Data Helper"""
import dns.exception
from ._compat import binary_type, string_types, PY2
# Figure out what constant python passes for an unspecified slice bound.
# It's supposed to be sys.maxint, yet on 64-bit windows sys.maxint is 2^31 - 1
# but Python uses 2^63 - 1 as the constant. Rather than making pointless
# extra comparisons, duplicating code, or weakening WireData, we just figure
# out what constant Python will use.
class _SliceUnspecifiedBound(binary_type):
def __getitem__(self, key):
return key.stop
if PY2:
def __getslice__(self, i, j): # pylint: disable=getslice-method
return self.__getitem__(slice(i, j))
_unspecified_bound = _SliceUnspecifiedBound()[1:]
class WireData(binary_type):
# WireData is a binary type with stricter slicing
def __getitem__(self, key):
try:
if isinstance(key, slice):
# make sure we are not going outside of valid ranges,
# do stricter control of boundaries than python does
# by default
start = key.start
stop = key.stop
if PY2:
if stop == _unspecified_bound:
# handle the case where the right bound is unspecified
stop = len(self)
if start < 0 or stop < 0:
raise dns.exception.FormError
# If it's not an empty slice, access left and right bounds
# to make sure they're valid
if start != stop:
super(WireData, self).__getitem__(start)
super(WireData, self).__getitem__(stop - 1)
else:
for index in (start, stop):
if index is None:
continue
elif abs(index) > len(self):
raise dns.exception.FormError
return WireData(super(WireData, self).__getitem__(
slice(start, stop)))
return bytearray(self.unwrap())[key]
except IndexError:
raise dns.exception.FormError
if PY2:
def __getslice__(self, i, j): # pylint: disable=getslice-method
return self.__getitem__(slice(i, j))
def __iter__(self):
i = 0
while 1:
try:
yield self[i]
i += 1
except dns.exception.FormError:
raise StopIteration
def unwrap(self):
return binary_type(self)
def maybe_wrap(wire):
if isinstance(wire, WireData):
return wire
elif isinstance(wire, binary_type):
return WireData(wire)
elif isinstance(wire, string_types):
return WireData(wire.encode())
raise ValueError("unhandled type %s" % type(wire))
| waynechu/PythonProject | dns/wiredata.py | Python | mit | 3,751 |
from flask_bcrypt import Bcrypt
from flask_caching import Cache
from flask_debugtoolbar import DebugToolbarExtension
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
import logging
bcrypt = Bcrypt()
login_manager = LoginManager()
db = SQLAlchemy()
migrate = Migrate()
cache = Cache()
debug_toolbar = DebugToolbarExtension()
gunicorn_error_logger = logging.getLogger('gunicorn.error') | rileymjohnson/fbla | app/extensions.py | Python | mit | 446 |
r"""
Fourier transform
=================
The graph Fourier transform :meth:`pygsp.graphs.Graph.gft` transforms a
signal from the vertex domain to the spectral domain. The smoother the signal
(see :meth:`pygsp.graphs.Graph.dirichlet_energy`), the lower in the frequencies
its energy is concentrated.
"""
import numpy as np
from matplotlib import pyplot as plt
import pygsp as pg
G = pg.graphs.Sensor(seed=42)
G.compute_fourier_basis()
scales = [10, 3, 0]
limit = 0.44
fig, axes = plt.subplots(2, len(scales), figsize=(12, 4))
fig.subplots_adjust(hspace=0.5)
x0 = np.random.default_rng(1).normal(size=G.N)
for i, scale in enumerate(scales):
g = pg.filters.Heat(G, scale)
x = g.filter(x0).squeeze()
x /= np.linalg.norm(x)
x_hat = G.gft(x).squeeze()
assert np.all((-limit < x) & (x < limit))
G.plot(x, limits=[-limit, limit], ax=axes[0, i])
axes[0, i].set_axis_off()
axes[0, i].set_title('$x^T L x = {:.2f}$'.format(G.dirichlet_energy(x)))
axes[1, i].plot(G.e, np.abs(x_hat), '.-')
axes[1, i].set_xticks(range(0, 16, 4))
axes[1, i].set_xlabel(r'graph frequency $\lambda$')
axes[1, i].set_ylim(-0.05, 0.95)
axes[1, 0].set_ylabel(r'frequency content $\hat{x}(\lambda)$')
# axes[0, 0].set_title(r'$x$: signal in the vertex domain')
# axes[1, 0].set_title(r'$\hat{x}$: signal in the spectral domain')
fig.tight_layout()
| epfl-lts2/pygsp | examples/fourier_transform.py | Python | bsd-3-clause | 1,371 |
import apsw
import datetime
from playhouse.apsw_ext import *
from playhouse.tests.base import ModelTestCase
db = APSWDatabase(':memory:')
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
username = CharField()
class Message(BaseModel):
user = ForeignKeyField(User)
message = TextField()
pub_date = DateTimeField()
published = BooleanField()
class APSWTestCase(ModelTestCase):
requires = [Message, User]
def test_db_register_functions(self):
result = db.execute_sql('SELECT date_part(?, ?)', (
'day', '2015-01-02 03:04:05')).fetchone()[0]
self.assertEqual(result, 2)
result = db.execute_sql('SELECT date_trunc(?, ?)', (
'day', '2015-01-02 03:04:05')).fetchone()[0]
self.assertEqual(result, '2015-01-02')
def test_db_pragmas(self):
test_db = APSWDatabase(':memory:', pragmas=(
('cache_size', '1337'),
))
test_db.connect()
cs = test_db.execute_sql('PRAGMA cache_size;').fetchone()[0]
self.assertEqual(cs, 1337)
def test_select_insert(self):
users = ('u1', 'u2', 'u3')
for user in users:
User.create(username=user)
self.assertEqual([x.username for x in User.select()], ['u1', 'u2', 'u3'])
self.assertEqual([x.username for x in User.select().filter(username='x')], [])
self.assertEqual([x.username for x in User.select().filter(username__in=['u1', 'u3'])], ['u1', 'u3'])
dt = datetime.datetime(2012, 1, 1, 11, 11, 11)
Message.create(user=User.get(username='u1'), message='herps', pub_date=dt, published=True)
Message.create(user=User.get(username='u2'), message='derps', pub_date=dt, published=False)
m1 = Message.get(message='herps')
self.assertEqual(m1.user.username, 'u1')
self.assertEqual(m1.pub_date, dt)
self.assertEqual(m1.published, True)
m2 = Message.get(message='derps')
self.assertEqual(m2.user.username, 'u2')
self.assertEqual(m2.pub_date, dt)
self.assertEqual(m2.published, False)
def test_update_delete(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
u1.username = 'u1-modified'
u1.save()
self.assertEqual(User.select().count(), 2)
self.assertEqual(User.get(username='u1-modified').id, u1.id)
u1.delete_instance()
self.assertEqual(User.select().count(), 1)
def test_transaction_handling(self):
dt = datetime.datetime(2012, 1, 1, 11, 11, 11)
def do_ctx_mgr_error():
with db.transaction():
User.create(username='u1')
raise ValueError
self.assertRaises(ValueError, do_ctx_mgr_error)
self.assertEqual(User.select().count(), 0)
def do_ctx_mgr_success():
with db.transaction():
u = User.create(username='test')
Message.create(message='testing', user=u, pub_date=dt, published=1)
do_ctx_mgr_success()
self.assertEqual(User.select().count(), 1)
self.assertEqual(Message.select().count(), 1)
@db.commit_on_success
def create_error():
u = User.create(username='test')
Message.create(message='testing', user=u, pub_date=dt, published=1)
raise ValueError
self.assertRaises(ValueError, create_error)
self.assertEqual(User.select().count(), 1)
@db.commit_on_success
def create_success():
u = User.create(username='test')
Message.create(message='testing', user=u, pub_date=dt, published=1)
create_success()
self.assertEqual(User.select().count(), 2)
self.assertEqual(Message.select().count(), 2)
def test_exists_regression(self):
User.create(username='u1')
self.assertTrue(User.select().where(User.username == 'u1').exists())
self.assertFalse(User.select().where(User.username == 'ux').exists())
| funkypawz/MakerRobot | peewee-master/playhouse/tests/test_apsw.py | Python | gpl-3.0 | 4,048 |
#!/usr/bin/python
#This is not my code, but a really nice wrapper ( taken from https://realpython.com/blog/python/primer-on-python-decorators/ )
import time
def timing_function(some_function):
"""
Outputs the time a function takes
to execute.
"""
def wrapper():
t1 = time.time()
some_function()
t2 = time.time()
return "Time it took to run the function: " + str((t2 - t1)) + "\n"
return wrapper
@timing_function
def my_function():
num_list = []
for num in (range(0, 10000)):
num_list.append(num)
print("\nSum of all the numbers: " + str((sum(num_list))))
print(my_function())
| shravanshandilya/catching-up-with-python | Decorators/timing_decorator.py | Python | mit | 663 |
import csv
import osgeo.ogr
from osgeo import ogr, osr
EPSG_LAT_LON = 4326
def read_tazs_from_csv(csv_zone_locs_fname):
taz_tuples = []
tfile = open(csv_zone_locs_fname, 'rb')
treader = csv.reader(tfile, delimiter=',', quotechar="'")
for ii, row in enumerate(treader):
if ii == 0: continue
else:
taz_tuple = (row[0], row[1], row[2])
taz_tuples.append(taz_tuple)
return taz_tuples
def read_tazs_from_shp(shp_zone_locs_fname):
taz_tuples = []
tazs_shp = osgeo.ogr.Open(shp_zone_locs_fname)
tazs_layer = tazs_shp.GetLayer(0)
src_srs = tazs_layer.GetSpatialRef()
target_srs = osr.SpatialReference()
target_srs.ImportFromEPSG(EPSG_LAT_LON)
transform_to_lat_lon = osr.CoordinateTransformation(src_srs,
target_srs)
for taz_feat in tazs_layer:
taz_id = taz_feat.GetField("N")
taz_geom = taz_feat.GetGeometryRef()
taz_geom.Transform(transform_to_lat_lon)
taz_lat = taz_geom.GetX()
taz_lon = taz_geom.GetY()
taz_tuples.append((taz_id, taz_lat, taz_lon))
taz_feat.Destroy()
tazs_shp.Destroy()
return taz_tuples
| PatSunter/pyOTPA | TAZs-OD-Matrix/taz_files.py | Python | bsd-3-clause | 1,176 |
# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Contains private support functions for the Display class
in output.py
"""
from __future__ import unicode_literals
__all__ = (
)
import io
import re
import sys
from portage import os
from portage import _encodings, _unicode_encode
from portage._sets.base import InternalPackageSet
from portage.output import (blue, bold, colorize, create_color_func,
green, red, teal, turquoise, yellow)
bad = create_color_func("BAD")
from portage.util import shlex_split, writemsg
from portage.util.SlotObject import SlotObject
from portage.versions import catpkgsplit
from _emerge.Blocker import Blocker
from _emerge.Package import Package
if sys.hexversion >= 0x3000000:
basestring = str
class _RepoDisplay(object):
def __init__(self, roots):
self._shown_repos = {}
self._unknown_repo = False
repo_paths = set()
for root_config in roots.values():
portdir = root_config.settings.get("PORTDIR")
if portdir:
repo_paths.add(portdir)
overlays = root_config.settings.get("PORTDIR_OVERLAY")
if overlays:
repo_paths.update(shlex_split(overlays))
repo_paths = list(repo_paths)
self._repo_paths = repo_paths
self._repo_paths_real = [ os.path.realpath(repo_path) \
for repo_path in repo_paths ]
# pre-allocate index for PORTDIR so that it always has index 0.
for root_config in roots.values():
portdb = root_config.trees["porttree"].dbapi
portdir = portdb.porttree_root
if portdir:
self.repoStr(portdir)
def repoStr(self, repo_path_real):
real_index = -1
if repo_path_real:
real_index = self._repo_paths_real.index(repo_path_real)
if real_index == -1:
s = "?"
self._unknown_repo = True
else:
shown_repos = self._shown_repos
repo_paths = self._repo_paths
repo_path = repo_paths[real_index]
index = shown_repos.get(repo_path)
if index is None:
index = len(shown_repos)
shown_repos[repo_path] = index
s = str(index)
return s
def __str__(self):
output = []
shown_repos = self._shown_repos
unknown_repo = self._unknown_repo
if shown_repos or self._unknown_repo:
output.append("Portage tree and overlays:\n")
show_repo_paths = list(shown_repos)
for repo_path, repo_index in shown_repos.items():
show_repo_paths[repo_index] = repo_path
if show_repo_paths:
for index, repo_path in enumerate(show_repo_paths):
output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
if unknown_repo:
output.append(" "+teal("[?]") + \
" indicates that the source repository could not be determined\n")
return "".join(output)
if sys.hexversion < 0x3000000:
__unicode__ = __str__
def __str__(self):
return _unicode_encode(self.__unicode__(),
encoding=_encodings['content'])
class _PackageCounters(object):
def __init__(self):
self.upgrades = 0
self.downgrades = 0
self.new = 0
self.newslot = 0
self.reinst = 0
self.uninst = 0
self.blocks = 0
self.blocks_satisfied = 0
self.totalsize = 0
self.restrict_fetch = 0
self.restrict_fetch_satisfied = 0
self.interactive = 0
self.binary = 0
def __str__(self):
total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
myoutput = []
details = []
myoutput.append("Total: %s package" % total_installs)
if total_installs != 1:
myoutput.append("s")
if total_installs != 0:
myoutput.append(" (")
if self.upgrades > 0:
details.append("%s upgrade" % self.upgrades)
if self.upgrades > 1:
details[-1] += "s"
if self.downgrades > 0:
details.append("%s downgrade" % self.downgrades)
if self.downgrades > 1:
details[-1] += "s"
if self.new > 0:
details.append("%s new" % self.new)
if self.newslot > 0:
details.append("%s in new slot" % self.newslot)
if self.newslot > 1:
details[-1] += "s"
if self.reinst > 0:
details.append("%s reinstall" % self.reinst)
if self.reinst > 1:
details[-1] += "s"
if self.binary > 0:
details.append("%s binary" % self.binary)
if self.binary > 1:
details[-1] = details[-1][:-1] + "ies"
if self.uninst > 0:
details.append("%s uninstall" % self.uninst)
if self.uninst > 1:
details[-1] += "s"
if self.interactive > 0:
details.append("%s %s" % (self.interactive,
colorize("WARN", "interactive")))
myoutput.append(", ".join(details))
if total_installs != 0:
myoutput.append(")")
myoutput.append(", Size of downloads: %s" % _format_size(self.totalsize))
if self.restrict_fetch:
myoutput.append("\nFetch Restriction: %s package" % \
self.restrict_fetch)
if self.restrict_fetch > 1:
myoutput.append("s")
if self.restrict_fetch_satisfied < self.restrict_fetch:
myoutput.append(bad(" (%s unsatisfied)") % \
(self.restrict_fetch - self.restrict_fetch_satisfied))
if self.blocks > 0:
myoutput.append("\nConflict: %s block" % \
self.blocks)
if self.blocks > 1:
myoutput.append("s")
if self.blocks_satisfied < self.blocks:
myoutput.append(bad(" (%s unsatisfied)") % \
(self.blocks - self.blocks_satisfied))
return "".join(myoutput)
class _DisplayConfig(object):
def __init__(self, depgraph, mylist, favorites, verbosity):
frozen_config = depgraph._frozen_config
dynamic_config = depgraph._dynamic_config
self.mylist = mylist
self.favorites = InternalPackageSet(favorites, allow_repo=True)
self.verbosity = verbosity
if self.verbosity is None:
self.verbosity = ("--quiet" in frozen_config.myopts and 1 or \
"--verbose" in frozen_config.myopts and 3 or 2)
self.oneshot = "--oneshot" in frozen_config.myopts or \
"--onlydeps" in frozen_config.myopts
self.columns = "--columns" in frozen_config.myopts
self.tree_display = "--tree" in frozen_config.myopts
self.alphabetical = "--alphabetical" in frozen_config.myopts
self.quiet = "--quiet" in frozen_config.myopts
self.all_flags = self.verbosity == 3 or self.quiet
self.print_use_string = self.verbosity != 1 or "--verbose" in frozen_config.myopts
self.changelog = "--changelog" in frozen_config.myopts
self.edebug = frozen_config.edebug
self.unordered_display = "--unordered-display" in frozen_config.myopts
mywidth = 130
if "COLUMNWIDTH" in frozen_config.settings:
try:
mywidth = int(frozen_config.settings["COLUMNWIDTH"])
except ValueError as e:
writemsg("!!! %s\n" % str(e), noiselevel=-1)
writemsg("!!! Unable to parse COLUMNWIDTH='%s'\n" % \
frozen_config.settings["COLUMNWIDTH"], noiselevel=-1)
del e
self.columnwidth = mywidth
if "--quiet-repo-display" in frozen_config.myopts:
self.repo_display = _RepoDisplay(frozen_config.roots)
self.trees = frozen_config.trees
self.pkgsettings = frozen_config.pkgsettings
self.target_root = frozen_config.target_root
self.running_root = frozen_config._running_root
self.roots = frozen_config.roots
self.blocker_parents = dynamic_config._blocker_parents
self.reinstall_nodes = dynamic_config._reinstall_nodes
self.digraph = dynamic_config.digraph
self.blocker_uninstalls = dynamic_config._blocker_uninstalls
self.slot_pkg_map = dynamic_config._slot_pkg_map
self.set_nodes = dynamic_config._set_nodes
self.pkg_use_enabled = depgraph._pkg_use_enabled
self.pkg = depgraph._pkg
# formats a size given in bytes nicely
def _format_size(mysize):
if isinstance(mysize, basestring):
return mysize
if 0 != mysize % 1024:
# Always round up to the next kB so that it doesn't show 0 kB when
# some small file still needs to be fetched.
mysize += 1024 - mysize % 1024
mystr=str(mysize//1024)
mycount=len(mystr)
while (mycount > 3):
mycount-=3
mystr=mystr[:mycount]+","+mystr[mycount:]
return mystr+" kB"
def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
old_iuse, old_use,
is_new, feature_flags, reinst_flags):
if not conf.print_use_string:
return ""
enabled = []
if conf.alphabetical:
disabled = enabled
removed = enabled
else:
disabled = []
removed = []
cur_iuse = set(cur_iuse)
enabled_flags = cur_iuse.intersection(cur_use)
removed_iuse = set(old_iuse).difference(cur_iuse)
any_iuse = cur_iuse.union(old_iuse)
any_iuse = list(any_iuse)
any_iuse.sort()
for flag in any_iuse:
flag_str = None
isEnabled = False
reinst_flag = reinst_flags and flag in reinst_flags
if flag in enabled_flags:
isEnabled = True
if is_new or flag in old_use and \
(conf.all_flags or reinst_flag):
flag_str = red(flag)
elif flag not in old_iuse:
flag_str = yellow(flag) + "%*"
elif flag not in old_use:
flag_str = green(flag) + "*"
elif flag in removed_iuse:
if conf.all_flags or reinst_flag:
flag_str = yellow("-" + flag) + "%"
if flag in old_use:
flag_str += "*"
flag_str = "(" + flag_str + ")"
removed.append(flag_str)
continue
else:
if is_new or flag in old_iuse and \
flag not in old_use and \
(conf.all_flags or reinst_flag):
flag_str = blue("-" + flag)
elif flag not in old_iuse:
flag_str = yellow("-" + flag)
if flag not in iuse_forced:
flag_str += "%"
elif flag in old_use:
flag_str = green("-" + flag) + "*"
if flag_str:
if flag in feature_flags:
flag_str = "{" + flag_str + "}"
elif flag in iuse_forced:
flag_str = "(" + flag_str + ")"
if isEnabled:
enabled.append(flag_str)
else:
disabled.append(flag_str)
if conf.alphabetical:
ret = " ".join(enabled)
else:
ret = " ".join(enabled + disabled + removed)
if ret:
ret = '%s="%s" ' % (name, ret)
return ret
def _tree_display(conf, mylist):
# If there are any Uninstall instances, add the
# corresponding blockers to the digraph.
mygraph = conf.digraph.copy()
executed_uninstalls = set(node for node in mylist \
if isinstance(node, Package) and node.operation == "unmerge")
for uninstall in conf.blocker_uninstalls.leaf_nodes():
uninstall_parents = \
conf.blocker_uninstalls.parent_nodes(uninstall)
if not uninstall_parents:
continue
# Remove the corresponding "nomerge" node and substitute
# the Uninstall node.
inst_pkg = conf.pkg(uninstall.cpv, "installed",
uninstall.root_config, installed=True)
try:
mygraph.remove(inst_pkg)
except KeyError:
pass
try:
inst_pkg_blockers = conf.blocker_parents.child_nodes(inst_pkg)
except KeyError:
inst_pkg_blockers = []
# Break the Package -> Uninstall edges.
mygraph.remove(uninstall)
# Resolution of a package's blockers
# depend on it's own uninstallation.
for blocker in inst_pkg_blockers:
mygraph.add(uninstall, blocker)
# Expand Package -> Uninstall edges into
# Package -> Blocker -> Uninstall edges.
for blocker in uninstall_parents:
mygraph.add(uninstall, blocker)
for parent in conf.blocker_parents.parent_nodes(blocker):
if parent != inst_pkg:
mygraph.add(blocker, parent)
# If the uninstall task did not need to be executed because
# of an upgrade, display Blocker -> Upgrade edges since the
# corresponding Blocker -> Uninstall edges will not be shown.
upgrade_node = \
conf.slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
if upgrade_node is not None and \
uninstall not in executed_uninstalls:
for blocker in uninstall_parents:
mygraph.add(upgrade_node, blocker)
if conf.unordered_display:
display_list = _unordered_tree_display(mygraph, mylist)
else:
display_list = _ordered_tree_display(conf, mygraph, mylist)
_prune_tree_display(display_list)
return display_list
def _unordered_tree_display(mygraph, mylist):
display_list = []
seen_nodes = set()
def print_node(node, depth):
if node in seen_nodes:
pass
else:
seen_nodes.add(node)
if isinstance(node, (Blocker, Package)):
display_list.append((node, depth, True))
else:
depth = -1
for child_node in mygraph.child_nodes(node):
print_node(child_node, depth + 1)
for root_node in mygraph.root_nodes():
print_node(root_node, 0)
return display_list
def _ordered_tree_display(conf, mygraph, mylist):
depth = 0
shown_edges = set()
tree_nodes = []
display_list = []
for x in mylist:
depth = len(tree_nodes)
while depth and x not in \
mygraph.child_nodes(tree_nodes[depth-1]):
depth -= 1
if depth:
tree_nodes = tree_nodes[:depth]
tree_nodes.append(x)
display_list.append((x, depth, True))
shown_edges.add((x, tree_nodes[depth-1]))
else:
traversed_nodes = set() # prevent endless circles
traversed_nodes.add(x)
def add_parents(current_node, ordered):
parent_nodes = None
# Do not traverse to parents if this node is an
# an argument or a direct member of a set that has
# been specified as an argument (system or world).
if current_node not in conf.set_nodes:
parent_nodes = mygraph.parent_nodes(current_node)
if parent_nodes:
child_nodes = set(mygraph.child_nodes(current_node))
selected_parent = None
# First, try to avoid a direct cycle.
for node in parent_nodes:
if not isinstance(node, (Blocker, Package)):
continue
if node not in traversed_nodes and \
node not in child_nodes:
edge = (current_node, node)
if edge in shown_edges:
continue
selected_parent = node
break
if not selected_parent:
# A direct cycle is unavoidable.
for node in parent_nodes:
if not isinstance(node, (Blocker, Package)):
continue
if node not in traversed_nodes:
edge = (current_node, node)
if edge in shown_edges:
continue
selected_parent = node
break
if selected_parent:
shown_edges.add((current_node, selected_parent))
traversed_nodes.add(selected_parent)
add_parents(selected_parent, False)
display_list.append((current_node,
len(tree_nodes), ordered))
tree_nodes.append(current_node)
tree_nodes = []
add_parents(x, True)
return display_list
def _prune_tree_display(display_list):
last_merge_depth = 0
for i in range(len(display_list) - 1, -1, -1):
node, depth, ordered = display_list[i]
if not ordered and depth == 0 and i > 0 \
and node == display_list[i-1][0] and \
display_list[i-1][1] == 0:
# An ordered node got a consecutive duplicate
# when the tree was being filled in.
del display_list[i]
continue
if ordered and isinstance(node, Package) \
and node.operation in ('merge', 'uninstall'):
last_merge_depth = depth
continue
if depth >= last_merge_depth or \
i < len(display_list) - 1 and \
depth >= display_list[i+1][1]:
del display_list[i]
def _calc_changelog(ebuildpath,current,next):
if ebuildpath == None or not os.path.exists(ebuildpath):
return []
current = '-'.join(catpkgsplit(current)[1:])
if current.endswith('-r0'):
current = current[:-3]
next = '-'.join(catpkgsplit(next)[1:])
if next.endswith('-r0'):
next = next[:-3]
changelogdir = os.path.dirname(ebuildpath)
changelogs = ['ChangeLog']
# ChangeLog-YYYY (see bug #389611)
changelogs.extend(sorted((fn for fn in os.listdir(changelogdir)
if fn.startswith('ChangeLog-')), reverse=True))
divisions = []
found_current = False
for fn in changelogs:
changelogpath = os.path.join(changelogdir, fn)
try:
with io.open(_unicode_encode(changelogpath,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace') as f:
changelog = f.read()
except EnvironmentError:
return []
for node in _find_changelog_tags(changelog):
if node[0] == current:
found_current = True
break
else:
divisions.append(node)
if found_current:
break
if not found_current:
return []
#print 'XX from',current,'to',next
#for div,text in divisions: print 'XX',div
# skip entries for all revisions above the one we are about to emerge
later_rev_index = None
for i, node in enumerate(divisions):
if node[0] == next:
if later_rev_index is not None:
first_node = divisions[later_rev_index]
# Discard the later revision and the first ChangeLog entry
# that follows it. We want to display all the entries after
# that first entry, as discussed in bug #373009.
trimmed_lines = []
iterator = iter(first_node[1])
for l in iterator:
if not l:
# end of the first entry that's discarded
break
first_node = (None, list(iterator))
divisions = [first_node] + divisions[later_rev_index+1:]
break
if node[0] is not None:
later_rev_index = i
output = []
prev_blank = False
prev_rev = False
for rev, lines in divisions:
if rev is not None:
if not (prev_blank or prev_rev):
output.append("\n")
output.append(bold('*' + rev) + '\n')
prev_rev = True
prev_blank = False
if lines:
prev_rev = False
if not prev_blank:
output.append("\n")
for l in lines:
output.append(l + "\n")
output.append("\n")
prev_blank = True
return output
def _strip_header_comments(lines):
# strip leading and trailing blank or header/comment lines
i = 0
while i < len(lines) and (not lines[i] or lines[i][:1] == "#"):
i += 1
if i:
lines = lines[i:]
while lines and (not lines[-1] or lines[-1][:1] == "#"):
lines.pop()
return lines
def _find_changelog_tags(changelog):
divs = []
if not changelog:
return divs
release = None
release_end = 0
for match in re.finditer(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?$',
changelog, re.M):
divs.append((release, _strip_header_comments(
changelog[release_end:match.start()].splitlines())))
release_end = match.end()
release = match.group(1)
if release.endswith('.ebuild'):
release = release[:-7]
if release.endswith('-r0'):
release = release[:-3]
divs.append((release,
_strip_header_comments(changelog[release_end:].splitlines())))
return divs
class PkgInfo(object):
"""Simple class to hold instance attributes for current
information about the pkg being printed.
"""
__slots__ = ("attr_display", "built", "cp",
"ebuild_path", "fetch_symbol", "merge",
"oldbest", "oldbest_list", "operation", "ordered", "previous_pkg",
"repo_name", "repo_path_real", "slot", "sub_slot", "system", "use", "ver", "world")
def __init__(self):
self.built = False
self.cp = ''
self.ebuild_path = ''
self.fetch_symbol = ''
self.merge = ''
self.oldbest = ''
self.oldbest_list = []
self.operation = ''
self.ordered = False
self.previous_pkg = None
self.repo_path_real = ''
self.repo_name = ''
self.slot = ''
self.sub_slot = ''
self.system = False
self.use = ''
self.ver = ''
self.world = False
self.attr_display = PkgAttrDisplay()
class PkgAttrDisplay(SlotObject):
__slots__ = ("downgrade", "fetch_restrict", "fetch_restrict_satisfied",
"force_reinstall",
"interactive", "mask", "new", "new_slot", "new_version", "replace")
def __str__(self):
output = []
if self.interactive:
output.append(colorize("WARN", "I"))
else:
output.append(" ")
if self.new or self.force_reinstall:
if self.force_reinstall:
output.append(red("r"))
else:
output.append(green("N"))
else:
output.append(" ")
if self.new_slot or self.replace:
if self.replace:
output.append(yellow("R"))
else:
output.append(green("S"))
else:
output.append(" ")
if self.fetch_restrict or self.fetch_restrict_satisfied:
if self.fetch_restrict_satisfied:
output.append(green("f"))
else:
output.append(red("F"))
else:
output.append(" ")
if self.new_version:
output.append(turquoise("U"))
else:
output.append(" ")
if self.downgrade:
output.append(blue("D"))
else:
output.append(" ")
if self.mask is not None:
output.append(self.mask)
return "".join(output)
if sys.hexversion < 0x3000000:
__unicode__ = __str__
def __str__(self):
return _unicode_encode(self.__unicode__(),
encoding=_encodings['content'])
| entoo/portage-src | pym/_emerge/resolver/output_helpers.py | Python | gpl-2.0 | 19,925 |
from helpers.manipulator_base import ManipulatorBase
class InsightManipulator(ManipulatorBase):
"""
Handle Insight database writes.
"""
@classmethod
def updateMerge(self, new_insight, old_insight, auto_union=True):
"""
Given an "old" and a "new" Insight object, replace the fields in the
"old" Insight that are present in the "new" Insight, but keep fields from
the "old" Insight that are null in the "new" insight.
"""
attrs = [
'name',
'year',
'data_json',
]
for attr in attrs:
if getattr(new_insight, attr) is not None:
if getattr(new_insight, attr) != getattr(old_insight, attr):
setattr(old_insight, attr, getattr(new_insight, attr))
old_insight.dirty = True
if getattr(new_insight, attr) == "None":
if getattr(old_insight, attr, None) != None:
setattr(old_insight, attr, None)
old_insight.dirty = True
return old_insight
| the-blue-alliance/the-blue-alliance | old_py2/helpers/insight_manipulator.py | Python | mit | 1,096 |
#!/usr/bin/env python
# encoding: utf-8
from smisk.test import *
from smisk.inflection import inflection as en
class English(TestCase):
def test_plural(self):
assert en.pluralize(u'mouse') == u'mice'
assert en.pluralize(u'train') == u'trains'
assert en.pluralize(u'commotion') == u'commotion'
assert en.pluralize(u'cat') == u'cats'
def test_camel(self):
assert en.camelize(u'moder_controller/barn') == u'ModerController.Barn'
def test_human(self):
assert en.humanize(u'employee_salary') == u'Employee salary'
assert en.humanize(u'author_id') == u'Author'
def test_demodule(self):
assert en.demodulize(u'ActiveRecord.CoreExtensions.String.Inflection') == u'Inflection'
assert en.demodulize(u'Inflection') == u'Inflection'
def test_table(self):
assert en.tableize(u'RawScaledScorer') == u'raw_scaled_scorers'
assert en.tableize(u'egg_and_ham') == u'egg_and_hams'
assert en.tableize(u'fancyCategory') == u'fancy_categories'
def test_class(self):
assert en.classify(u'egg_and_hams') == u'EggAndHam'
assert en.classify(u'post') == u'Post'
assert en.classify(u'categories') == u'Category'
def test_foreignKey(self):
assert en.foreignKey(u'Message') == u'message_id'
assert en.foreignKey(u'Message', False) == u'messageid'
assert en.foreignKey(u'admin.Post') == u'post_id'
def test_ordinal(self):
assert en.ordinalize(1) == u"1st"
assert en.ordinalize(2) == u"2nd"
assert en.ordinalize(3) == u"3rd"
assert en.ordinalize(8) == u"8th"
assert en.ordinalize(12) == u"12th"
assert en.ordinalize(1002) == u"1002nd"
assert en.ordinalize(9876) == u"9876th"
def test_misc(self):
assert en.underscore(u'ModerController.Barn') == u'moder_controller/barn'
#from smisk.inflection.sv import inflection as sv
#class Swedish(TestCase):
# def test_plural(self):
# assert sv.pluralize(u'mus') == u'möss'
# assert sv.pluralize(u'train') == u'trainer'
# assert sv.pluralize(u'post') == u'poster'
# assert sv.pluralize(u'person') == u'personer'
#
# def test_dual(self):
# def t(singular, plural):
# #print singular, u"->", sv.pluralize(singular) + u',', plural, u'->', sv.singularize(plural)
# assert sv.pluralize(singular) == plural
# assert sv.singularize(plural) == singular
# t(u"bil", u"bilar")
# t(u"båt", u"båtar")
# t(u"katt", u"katter")
# t(u"peng", u"pengar")
# t(u"man", u"män")
# t(u"person", u"personer")
# t(u"huvud", u"huvuden")
# t(u"folk", u"folk")
# t(u"vittne", u"vittnen")
# t(u"morsa", u"morsor")
# t(u"liten", u"små")
# t(u"stor", u"stora")
# t(u"ny", u"nya")
# t(u"rik", u"rika")
# t(u"dum", u"dumma")
# t(u"stum", u"stumma")
# t(u"kvinna", u"kvinnor")
# t(u"intressant", u"intressanta")
# t(u"given", u"givna")
# t(u"ven", u"vener")
# t(u"hand", u"händer")
# t(u"land", u"länder")
# t(u"kviga", u"kvigor")
# t(u"mun", u"munnar")
# t(u"ros", u"rosor")
# t(u"lus", u"löss")
# t(u"mus", u"möss")
# t(u"kust", u"kuster")
# t(u"lust", u"lustar")
# t(u"pojke", u"pojkar")
# t(u"flicka", u"flickor")
# t(u"snorkel", u"snorklar")
#
# def test_ordinal(self):
# assert sv.ordinalize(1) == u"1:a"
# assert sv.ordinalize(2) == u"2:a"
# assert sv.ordinalize(3) == u"3:e"
# assert sv.ordinalize(921.3) == u"921:a"
# assert sv.ordinalize(500) == u"500:e"
#
def suite():
return unittest.TestSuite([
unittest.makeSuite(English),
#unittest.makeSuite(Swedish),
])
def test():
runner = unittest.TextTestRunner()
return runner.run(suite())
if __name__ == "__main__":
test()
| rsms/smisk | lib/smisk/test/inflection.py | Python | mit | 3,656 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GiG G8 Driver
"""
import json
from libcloud.compute.base import NodeImage, NodeSize, Node
from libcloud.compute.base import NodeDriver, UuidMixin
from libcloud.compute.base import StorageVolume, NodeAuthSSHKey
from libcloud.compute.types import Provider, NodeState
from libcloud.common.gig_g8 import G8Connection
from libcloud.common.exceptions import BaseHTTPError
class G8ProvisionError(Exception):
pass
class G8PortForward(UuidMixin):
def __init__(self, network, node_id, publicport,
privateport, protocol, driver):
self.node_id = node_id
self.network = network
self.publicport = int(publicport)
self.privateport = int(privateport)
self.protocol = protocol
self.driver = driver
UuidMixin.__init__(self)
def destroy(self):
self.driver.ex_delete_portforward(self)
class G8Network(UuidMixin):
"""
G8 Network object class.
This class maps to a cloudspace
"""
def __init__(self, id, name, cidr, publicipaddress, driver, extra=None):
self.id = id
self.name = name
self._cidr = cidr
self.driver = driver
self.publicipaddress = publicipaddress
self.extra = extra
UuidMixin.__init__(self)
@property
def cidr(self):
"""
Cidr is not part of the list result
we will lazily fetch it with a get request
"""
if self._cidr is None:
networkdata = self.driver._api_request("/cloudspaces/get",
{"cloudspaceId": self.id})
self._cidr = networkdata["privatenetwork"]
return self._cidr
def list_nodes(self):
return self.driver.list_nodes(self)
def destroy(self):
return self.driver.ex_destroy_network(self)
def list_portforwards(self):
return self.driver.ex_list_portforwards(self)
def create_portforward(self, node, publicport,
privateport, protocol='tcp'):
return self.driver.ex_create_portforward(self, node, publicport,
privateport, protocol)
class G8NodeDriver(NodeDriver):
"""
GiG G8 node driver
"""
NODE_STATE_MAP = {'VIRTUAL': NodeState.PENDING,
'HALTED': NodeState.STOPPED,
'RUNNING': NodeState.RUNNING,
'DESTROYED': NodeState.TERMINATED,
'DELETED': NodeState.TERMINATED,
'PAUSED': NodeState.PAUSED,
'ERROR': NodeState.ERROR,
# transition states
'DEPLOYING': NodeState.PENDING,
'STOPPING': NodeState.STOPPING,
'MOVING': NodeState.MIGRATING,
'RESTORING': NodeState.PENDING,
'STARTING': NodeState.STARTING,
'PAUSING': NodeState.PENDING,
'RESUMING': NodeState.PENDING,
'RESETTING': NodeState.REBOOTING,
'DELETING': NodeState.TERMINATED,
'DESTROYING': NodeState.TERMINATED,
'ADDING_DISK': NodeState.RECONFIGURING,
'ATTACHING_DISK': NodeState.RECONFIGURING,
'DETACHING_DISK': NodeState.RECONFIGURING,
'ATTACHING_NIC': NodeState.RECONFIGURING,
'DETTACHING_NIC': NodeState.RECONFIGURING,
'DELETING_DISK': NodeState.RECONFIGURING,
'CHANGING_DISK_LIMITS': NodeState.RECONFIGURING,
'CLONING': NodeState.PENDING,
'RESIZING': NodeState.RECONFIGURING,
'CREATING_TEMPLATE': NodeState.PENDING,
}
name = "GiG G8 Node Provider"
website = 'https://gig.tech'
type = Provider.GIG_G8
connectionCls = G8Connection
def __init__(self, user_id, key, api_url):
# type (int, str, str) -> None
"""
:param key: Token to use for api (jwt)
:type key: ``str``
:param user_id: Id of the account to connect to (accountId)
:type user_id: ``int``
:param api_url: G8 api url
:type api_url: ``str``
:rtype: ``None``
"""
self._apiurl = api_url.rstrip("/")
super(G8NodeDriver, self).__init__(key=key)
self._account_id = user_id
self._location_data = None
def _ex_connection_class_kwargs(self):
return {"url": self._apiurl}
def _api_request(self, endpoint, params=None):
return self.connection.request(endpoint.lstrip("/"),
data=json.dumps(params),
method="POST").object
@property
def _location(self):
if self._location_data is None:
self._location_data = self._api_request("/locations/list")[0]
return self._location_data
def create_node(self, name, image, ex_network, ex_description,
size=None, auth=None, ex_create_attr=None,
ex_expose_ssh=False):
# type (str, Image, G8Network, str, Size,
# Optional[NodeAuthSSHKey], Optional[Dict], bool) -> Node
"""
Create a node.
The `ex_create_attr` parameter can include the following dictionary
key and value pairs:
* `memory`: ``int`` Memory in MiB
(only used if size is None and vcpus is passed
* `vcpus`: ``int`` Amount of vcpus
(only used if size is None and memory is passed)
* `disk_size`: ``int`` Size of bootdisk
defaults to minimumsize of the image
* `user_data`: ``str`` for cloud-config data
* `private_ip`: ``str`` Private Ip inside network
* `data_disks`: ``list(int)`` Extra data disks to assign
to vm list of disk sizes in GiB
:param name: the name to assign the vm
:type name: ``str``
:param size: the plan size to create
mutual exclusive with `memory` `vcpus`
:type size: :class:`NodeSize`
:param image: which distribution to deploy on the vm
:type image: :class:`NodeImage`
:param network: G8 Network to place vm in
:type size: :class:`G8Network`
:param ex_description: Descripton of vm
:type size: : ``str``
:param auth: an SSH key
:type auth: :class:`NodeAuthSSHKey`
:param ex_create_attr: A dictionary of optional attributes for
vm creation
:type ex_create_attr: ``dict``
:param ex_expose_ssh: Create portforward for ssh port
:type ex_expose_ssh: int
:return: The newly created node.
:rtype: :class:`Node`
"""
params = {"name": name,
"imageId": int(image.id),
"cloudspaceId": int(ex_network.id),
"description": ex_description}
ex_create_attr = ex_create_attr or {}
if size:
params["sizeId"] = int(size.id)
else:
params["memory"] = ex_create_attr["memory"]
params["vcpus"] = ex_create_attr["vcpus"]
if "user_data" in ex_create_attr:
params["userdata"] = ex_create_attr["user_data"]
if "data_disks" in ex_create_attr:
params["datadisks"] = ex_create_attr["data_disks"]
if "private_ip" in ex_create_attr:
params["privateIp"] = ex_create_attr["private_ip"]
if "disk_size" in ex_create_attr:
params["disksize"] = ex_create_attr["disk_size"]
else:
params["disksize"] = image.extra["min_disk_size"]
if auth and isinstance(auth, NodeAuthSSHKey):
userdata = params.setdefault("userdata", {})
users = userdata.setdefault("users", [])
root = None
for user in users:
if user["name"] == "root":
root = user
break
else:
root = {"name": "root", "shell": "/bin/bash"}
users.append(root)
keys = root.setdefault("ssh-authorized-keys", [])
keys.append(auth.pubkey)
elif auth:
error = "Auth type {} is not implemented".format(type(auth))
raise NotImplementedError(error)
machineId = self._api_request("/machines/create", params)
machine = self._api_request("/machines/get",
params={"machineId": machineId})
node = self._to_node(machine, ex_network)
if ex_expose_ssh:
port = self.ex_expose_ssh_node(node)
node.extra["ssh_port"] = port
node.extra["ssh_ip"] = ex_network.publicipaddress
return node
def _find_ssh_ports(self, ex_network, node):
forwards = ex_network.list_portforwards()
usedports = []
result = {"node": None, "network": usedports}
for forward in forwards:
usedports.append(forward.publicport)
if forward.node_id == node.id and forward.privateport == 22:
result["node"] = forward.privateport
return result
def ex_expose_ssh_node(self, node):
"""
Create portforward for ssh purposed
:param node: Node to expose ssh for
:type node: ``Node``
:rtype: ``int``
"""
network = node.extra["network"]
ports = self._find_ssh_ports(network, node)
if ports["node"]:
return ports["node"]
usedports = ports["network"]
sshport = 2200
endport = 3000
while sshport < endport:
while sshport in usedports:
sshport += 1
try:
network.create_portforward(node, sshport, 22)
node.extra["ssh_port"] = sshport
node.extra["ssh_ip"] = network.publicipaddress
break
except BaseHTTPError as e:
if e.code == 409:
# port already used maybe raise let's try next
usedports.append(sshport)
raise
else:
raise G8ProvisionError("Failed to create portforward")
return sshport
def ex_create_network(self, name, private_network="192.168.103.0/24",
type="vgw"):
# type (str, str, str) -> G8Network
"""
Create network also known as cloudspace
:param name: the name to assing to the network
:type name: ``str``
:param private_network: subnet used as private network
:type private_network: ``str``
:param type: type of the gateway vgw or routeros
:type type: ``str``
"""
userinfo = self._api_request("../system/usermanager/whoami")
params = {"accountId": self._account_id,
"privatenetwork": private_network,
"access": userinfo["name"],
"name": name,
"location": self._location["locationCode"],
"type": type}
networkid = self._api_request("/cloudspaces/create", params)
network = self._api_request("/cloudspaces/get",
{"cloudspaceId": networkid})
return self._to_network(network)
def ex_destroy_network(self, network):
# type (G8Network) -> bool
self._api_request("/cloudspaces/delete",
{"cloudspaceId": int(network.id)})
return True
def stop_node(self, node):
# type (Node) -> bool
"""
Stop virtual machine
"""
node.state = NodeState.STOPPING
self._api_request("/machines/stop", {"machineId": int(node.id)})
node.state = NodeState.STOPPED
return True
def ex_list_portforwards(self, network):
# type (G8Network) -> List[G8PortForward]
data = self._api_request("/portforwarding/list",
{"cloudspaceId": int(network.id)})
forwards = []
for forward in data:
forwards.append(self._to_port_forward(forward, network))
return forwards
def ex_create_portforward(self, network, node, publicport,
privateport, protocol="tcp"):
# type (G8Network, Node, int, int, str) -> G8PortForward
params = {"cloudspaceId": int(network.id),
"machineId": int(node.id),
"localPort": privateport,
"publicPort": publicport,
"publicIp": network.publicipaddress,
"protocol": protocol}
self._api_request("/portforwarding/create", params)
return self._to_port_forward(params, network)
def ex_delete_portforward(self, portforward):
# type (G8PortForward) -> bool
params = {"cloudspaceId": int(portforward.network.id),
"publicIp": portforward.network.publicipaddress,
"publicPort": portforward.publicport,
"proto": portforward.protocol}
self._api_request("/portforwarding/deleteByPort", params)
return True
def start_node(self, node):
# type (Node) -> bool
"""
Start virtual machine
"""
node.state = NodeState.STARTING
self._api_request("/machines/start", {"machineId": int(node.id)})
node.state = NodeState.RUNNING
return True
def ex_list_networks(self):
# type () -> List[G8Network]
"""
Return the list of networks.
:return: A list of network objects.
:rtype: ``list`` of :class:`G8Network`
"""
networks = []
for network in self._api_request("/cloudspaces/list"):
if network["accountId"] == self._account_id:
networks.append(self._to_network(network))
return networks
def list_sizes(self):
# type () -> List[Size]
"""
Returns a list of node sizes as a cloud provider might have
"""
location = self._location["locationCode"]
sizes = []
for size in self._api_request("/sizes/list", {"location": location}):
sizes.extend(self._to_size(size))
return sizes
def list_nodes(self, ex_network=None):
# type (Optional[G8Network]) -> List[Node]
"""
List the nodes known to a particular driver;
There are two default nodes created at the beginning
"""
def _get_ssh_port(forwards, node):
for forward in forwards:
if forward.node_id == node.id and forward.privateport == 22:
return forward
if ex_network:
networks = [ex_network]
else:
networks = self.ex_list_networks()
nodes = []
for network in networks:
nodes_list = self._api_request("/machines/list",
params={"cloudspaceId": network.id})
forwards = network.list_portforwards()
for nodedata in nodes_list:
node = self._to_node(nodedata, network)
sshforward = _get_ssh_port(forwards, node)
if sshforward:
node.extra["ssh_port"] = sshforward.publicport
node.extra["ssh_ip"] = network.publicipaddress
nodes.append(node)
return nodes
def reboot_node(self, node):
# type (Node) -> bool
"""
Reboot node
returns True as if the reboot had been successful.
"""
node.state = NodeState.REBOOTING
self._api_request("/machines/reboot", {"machineId": int(node.id)})
node.state = NodeState.RUNNING
return True
def destroy_node(self, node):
# type (Node) -> bool
"""
Destroy node
"""
self._api_request("/machines/delete", {"machineId": int(node.id)})
return True
def list_images(self):
# type () -> List[Image]
"""
Returns a list of images as a cloud provider might have
@inherits: :class:`NodeDriver.list_images`
"""
images = []
for image in self._api_request("/images/list",
{"accountId": self._account_id}):
images.append(self._to_image(image))
return images
def list_volumes(self):
# type () -> List[StorageVolume]
volumes = []
for disk in self._api_request("/disks/list",
{"accountId": self._account_id}):
if disk["status"] not in ["ASSIGNED", "CREATED"]:
continue
volumes.append(self._to_volume(disk))
return volumes
def create_volume(self, size, name, ex_description, ex_disk_type="D"):
# type (int, str, str, Optional[str]) -> StorageVolume
"""
Create volume
:param size: Size of the volume to create in GiB
:type size: ``int``
:param name: Name of the volume
:type name: ``str``
:param description: Descripton of the volume
:type description: ``str``
:param disk_type: Type of the disk depending on the G8
D for datadisk is always available
:type disk_type: ``str``
:rtype: class:`StorageVolume`
"""
params = {"size": size,
"name": name,
"type": ex_disk_type,
"description": ex_description,
"gid": self._location["gid"],
"accountId": self._account_id
}
diskId = self._api_request("/disks/create", params)
disk = self._api_request("/disks/get", {"diskId": diskId})
return self._to_volume(disk)
def destroy_volume(self, volume):
# type (StorageVolume) -> bool
self._api_request("/disks/delete", {"diskId": int(volume.id)})
return True
def attach_volume(self, node, volume):
# type (Node, StorageVolume) -> bool
params = {"machineId": int(node.id),
"diskId": int(volume.id)}
self._api_request("/machines/attachDisk", params)
return True
def detach_volume(self, node, volume):
# type (Node, StorageVolume) -> bool
params = {"machineId": int(node.id),
"diskId": int(volume.id)}
self._api_request("/machines/detachDisk", params)
return True
def _to_volume(self, data):
# type (dict) -> StorageVolume
extra = {"type": data["type"], "node_id": data.get("machineId")}
return StorageVolume(id=str(data["id"]), size=data["sizeMax"],
name=data["name"], driver=self,
extra=extra)
def _to_node(self, nodedata, ex_network):
# type (dict) -> Node
state = self.NODE_STATE_MAP.get(nodedata["status"], NodeState.UNKNOWN)
public_ips = []
private_ips = []
nics = nodedata.get("nics", [])
if not nics:
nics = nodedata.get("interfaces", [])
for nic in nics:
if nic["type"] == "PUBLIC":
public_ips.append(nic["ipAddress"].split("/")[0])
else:
private_ips.append(nic["ipAddress"])
extra = {"network": ex_network}
for account in nodedata.get("accounts", []):
extra["password"] = account["password"]
extra["username"] = account["login"]
return Node(id=str(nodedata['id']), name=nodedata['name'],
driver=self, public_ips=public_ips,
private_ips=private_ips, state=state, extra=extra)
def _to_network(self, network):
# type (dict) -> G8Network
return G8Network(str(network["id"]), network["name"], None,
network["externalnetworkip"], self)
def _to_image(self, image):
# type (dict) -> Image
extra = {"min_disk_size": image["bootDiskSize"],
"min_memory": image["memory"],
}
return NodeImage(id=str(image["id"]), name=image["name"],
driver=self, extra=extra)
def _to_size(self, size):
# type (dict) -> Size
sizes = []
for disk in size["disks"]:
sizes.append(NodeSize(id=str(size["id"]), name=size["name"],
ram=size["memory"], disk=disk,
driver=self, extra={"vcpus": size["vcpus"]},
bandwidth=0, price=0))
return sizes
def _to_port_forward(self, data, ex_network):
# type (dict, G8Network) -> G8PortForward
return G8PortForward(ex_network, str(data["machineId"]),
data["publicPort"], data["localPort"],
data["protocol"], self)
if __name__ == "__main__":
import doctest
doctest.testmod()
| Kami/libcloud | libcloud/compute/drivers/gig_g8.py | Python | apache-2.0 | 22,013 |
"""This module implements functions for querying properties of the operating
system or for the specific process the code is running in.
"""
import os
import sys
import re
import multiprocessing
import subprocess
try:
from subprocess import check_output as _execute_program
except ImportError:
def _execute_program(*popenargs, **kwargs):
# Replicates check_output() implementation from Python 2.7+.
# Should only be used for Python 2.6.
if 'stdout' in kwargs:
raise ValueError(
'stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
try:
import resource
except ImportError:
pass
def logical_processor_count():
"""Returns the number of logical processors in the system.
"""
# The multiprocessing module provides support for Windows,
# BSD systems (including MacOS X) and systems which support
# the POSIX API for querying the number of CPUs.
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
# For Jython, we need to query the Java runtime environment.
try:
from java.lang import Runtime
runtime = Runtime.getRuntime()
res = runtime.availableProcessors()
if res > 0:
return res
except ImportError:
pass
# Assuming that Solaris will support POSIX API for querying
# the number of CPUs. Just in case though, work it out by
# looking at the devices corresponding to the available CPUs.
try:
pseudoDevices = os.listdir('/devices/pseudo/')
expr = re.compile('^cpuid@[0-9]+$')
res = 0
for pd in pseudoDevices:
if expr.match(pd) != None:
res += 1
if res > 0:
return res
except OSError:
pass
# Fallback to assuming only a single CPU.
return 1
def _linux_physical_processor_count(filename=None):
# For Linux we can use information from '/proc/cpuinfo.
# A line in the file that starts with 'processor' marks the
# beginning of a section.
#
# Multi-core processors will have a 'processor' section for each
# core. There is usually a 'physical id' field and a 'cpu cores'
# field as well. The 'physical id' field in each 'processor'
# section will have the same value for all cores in a physical
# processor. The 'cpu cores' field for each 'processor' section will
# provide the total number of cores for that physical processor.
# The 'cpu cores' field is duplicated, so only remember the last
filename = filename or '/proc/cpuinfo'
processors = 0
physical_processors = {}
try:
with open(filename, 'r') as fp:
processor_id = None
cores = None
for line in fp:
try:
key, value = line.split(':')
key = key.lower().strip()
value = value.strip()
except ValueError:
continue
if key == 'processor':
processors += 1
# If this is not the first processor section
# and prior sections specified a physical ID
# and number of cores, we want to remember
# the number of cores corresponding to that
# physical core. Note that we may see details
# for the same phyiscal ID more than one and
# thus we only end up remember the number of
# cores from the last one we see.
if cores and processor_id:
physical_processors[processor_id] = cores
processor_id = None
cores = None
elif key == 'physical id':
processor_id = value
elif key == 'cpu cores':
cores = int(value)
# When we have reached the end of the file, we now need to save
# away the number of cores for the physical ID we saw in the
# last processor section.
if cores and processor_id:
physical_processors[processor_id] = cores
except Exception:
pass
num_physical_processors = len(physical_processors) or (processors
if processors == 1 else None)
num_physical_cores = sum(physical_processors.values()) or (processors
if processors == 1 else None)
return (num_physical_processors, num_physical_cores)
def _darwin_physical_processor_count():
# For MacOS X we can use sysctl.
physical_processor_cmd = ['/usr/sbin/sysctl', '-n', 'hw.packages']
try:
num_physical_processors = int(_execute_program(physical_processor_cmd,
stderr=subprocess.PIPE))
except (subprocess.CalledProcessError, ValueError):
num_physical_processors = None
physical_core_cmd = ['/usr/sbin/sysctl', '-n', 'hw.physicalcpu']
try:
num_physical_cores = int(_execute_program(physical_core_cmd,
stderr=subprocess.PIPE))
except (subprocess.CalledProcessError, ValueError):
num_physical_cores = None
return (num_physical_processors, num_physical_cores)
def physical_processor_count():
"""Returns the number of physical processors and the number of physical
cores in the system as a tuple. One or both values may be None, if a value
cannot be determined.
"""
if sys.platform.startswith('linux'):
return _linux_physical_processor_count()
elif sys.platform == 'darwin':
return _darwin_physical_processor_count()
return (None, None)
def _linux_total_physical_memory(filename=None):
# For Linux we can use information from /proc/meminfo. Although the
# units is given in the file, it is always in kilobytes so we do not
# need to accomodate any other unit types beside 'kB'.
filename = filename or '/proc/meminfo'
try:
parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB')
with open(filename, 'r') as fp:
for line in fp.readlines():
match = parser.match(line)
if not match:
continue
key, value = match.groups(['key', 'value'])
if key == 'MemTotal':
memory_bytes = float(value) * 1024
return memory_bytes / (1024*1024)
except Exception:
pass
def _darwin_total_physical_memory():
# For MacOS X we can use sysctl. The value queried from sysctl is
# always bytes.
command = ['/usr/sbin/sysctl', '-n', 'hw.memsize']
try:
return float(_execute_program(command,
stderr=subprocess.PIPE)) / (1024*1024)
except subprocess.CalledProcessError:
pass
except ValueError:
pass
def total_physical_memory():
"""Returns the total physical memory available in the system. Returns
None if the value cannot be calculated.
"""
if sys.platform.startswith('linux'):
return _linux_total_physical_memory()
elif sys.platform == 'darwin':
return _darwin_total_physical_memory()
def _linux_physical_memory_used(filename=None):
# For Linux we can use information from the proc filesystem. We use
# '/proc/statm' as it easier to parse than '/proc/status' file. The
# value queried from the file is always in bytes.
#
# /proc/[number]/statm
# Provides information about memory usage, measured
# in pages. The columns are:
#
# size total program size
# (same as VmSize in /proc/[number]/status)
# resident resident set size
# (same as VmRSS in /proc/[number]/status)
# share shared pages (from shared mappings)
# text text (code)
# lib library (unused in Linux 2.6)
# data data + stack
# dt dirty pages (unused in Linux 2.6)
filename = filename or '/proc/%d/statm' % os.getpid()
try:
with open(filename, 'r') as fp:
rss_pages = float(fp.read().split()[1])
memory_bytes = rss_pages * resource.getpagesize()
return memory_bytes / (1024*1024)
except Exception:
return 0
def physical_memory_used():
"""Returns the amount of physical memory used in MBs. Returns 0 if
the value cannot be calculated.
"""
# A value of 0 is returned by default rather than None as this value
# can be used in metrics. As such has traditionally always been
# returned as an integer to avoid checks at the point is used.
if sys.platform.startswith('linux'):
return _linux_physical_memory_used()
# For all other platforms try using getrusage() if we have the
# resource module available. The units returned can differ based on
# platform. Assume 1024 byte blocks as default. Some platforms such
# as Solaris will report zero for 'ru_maxrss', so we skip those.
try:
rusage = resource.getrusage(resource.RUSAGE_SELF)
except NameError:
pass
else:
if sys.platform == 'darwin':
# On MacOS X, despite the manual page saying the
# value is in kilobytes, it is actually in bytes.
memory_bytes = float(rusage.ru_maxrss)
return memory_bytes / (1024*1024)
elif rusage.ru_maxrss > 0:
memory_kbytes = float(rusage.ru_maxrss)
return memory_kbytes / 1024
return 0
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/common/system_info.py | Python | agpl-3.0 | 10,108 |
# tests.integration
# Integration testing - executes a complete simulation to look for errors.
#
# Author: Benjamin Bengfort <bengfort@cs.umd.edu>
# Created: Mon Apr 04 09:02:14 2016 -0400
#
# Copyright (C) 2016 University of Maryland
# For license information, see LICENSE.txt
#
# ID: integration.py [] benjamin@bengfort.com $
"""
Integration testing - executes a complete simulation to look for errors.
Note that this style of testing is officially run in:
tests/test_simulation/test_main.py
However, this package provides tools to easily allow import and play with the
integration tests for development purposes. (No tests will be run here).
"""
##########################################################################
## Imports
##########################################################################
import os
from cloudscope.simulation.main import ConsistencySimulation
##########################################################################
## Fixtures
##########################################################################
# Paths to load the test topologies
FIXTURES = os.path.join(os.path.dirname(__file__), "fixtures")
RAFT = os.path.join(FIXTURES, "raft.json")
EVENTUAL = os.path.join(FIXTURES, "eventual.json")
TAG = os.path.join(FIXTURES, "tag.json")
# Default Options for the simulations
OPTIONS = {
'max_sim_time': 100000,
'objects': 10,
}
##########################################################################
## Simulation Loader
##########################################################################
def getsim(topology='tag', **kwargs):
# Find the correct topology
topology = {
'tag': TAG,
'raft': RAFT,
'eventual': EVENTUAL,
}[topology.lower()]
# Update the default options
options = OPTIONS.copy()
options.update(kwargs)
# Load the simulation
with open(topology, 'r') as fobj:
sim = ConsistencySimulation.load(fobj, **options)
return sim
if __name__ == '__main__':
import argparse
# Parse the arguments from the command line
parser = argparse.ArgumentParser()
parser.add_argument(
'topology', choices=['tag', 'eventual', 'raft'], default='tag', nargs='?',
help='Specify the simulation topology to load.'
)
args = parser.parse_args()
sim = getsim(args.topology)
sim.run()
print sim.results.results.keys()
| bbengfort/cloudscope | tests/integration.py | Python | mit | 2,410 |
from jawaf.conf import settings
from jawaf.management.base import BaseCommand
from jawaf.server import Jawaf
class Command(BaseCommand):
"""Run Jawaf"""
def add_arguments(self, parser):
parser.add_argument('--host', help='Server host')
parser.add_argument('--port', help='Server port')
parser.add_argument('--debug', help='Debug Mode')
parser.add_argument('--workers', help='Workers')
def handle(self, **options):
print('... Starting Jawaf ...')
waf = Jawaf(settings.PROJECT_NAME)
# Optionally override settings with command line options:
host = options['host'] if options['host'] else settings.HOST
port = options['port'] if options['port'] else settings.PORT
debug = (options['debug'].lower() == 'true') if options['debug'] \
else settings.DEBUG
workers = int(options['workers']) if options['workers'] \
else settings.WORKERS
# Run the server
waf.run(host=host, port=port, debug=debug, workers=workers)
| danpozmanter/jawaf | jawaf/management/commands/run.py | Python | bsd-3-clause | 1,049 |
# Copyright 2014-2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from fuse import Operations, LoggingMixIn
class View(LoggingMixIn, Operations):
__metaclass__ = ABCMeta
def __init__(self, *args, **kwargs):
self.args = args
for attr in kwargs:
setattr(self, attr, kwargs[attr])
def getattr(self, path, fh=None):
return {
"st_uid": self.uid,
"st_gid": self.gid,
"st_ctime": self.mount_time,
"st_mtime": self.mount_time,
}
| PressLabs/gitfs | gitfs/views/view.py | Python | apache-2.0 | 1,072 |
#!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# (c) 2012-2014, Michael DeHaan <michael@ansible.com> and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import os
import glob
import sys
import yaml
import re
import optparse
import datetime
import cgi
import warnings
from collections import defaultdict
from jinja2 import Environment, FileSystemLoader
from six import iteritems
from ansible.utils import module_docs
from ansible.utils.vars import merge_hash
from ansible.utils.unicode import to_bytes
from ansible.errors import AnsibleError
#####################################################################################
# constants and paths
# if a module is added in a version of Ansible older than this, don't print the version added information
# in the module documentation because everyone is assumed to be running something newer than this already.
TO_OLD_TO_BE_NOTABLE = 1.3
# Get parent directory of the directory this script lives in
MODULEDIR=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
))
# The name of the DOCUMENTATION template
EXAMPLE_YAML=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
))
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
DEPRECATED = " (D)"
NOTCORE = " (E)"
#####################################################################################
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
try:
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
except Exception as e:
raise AnsibleError("Could not process (%s) : %s" % (str(text), str(e)))
return t
#####################################################################################
def html_ify(text):
''' convert symbols like I(this is in italics) to valid HTML '''
t = cgi.escape(text)
t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
t = _CONST.sub("<code>" + r"\1" + "</code>", t)
return t
#####################################################################################
def rst_fmt(text, fmt):
''' helper for Jinja2 to do format strings '''
return fmt % (text)
#####################################################################################
def rst_xline(width, char="="):
''' return a restructured text line of a given length '''
return char * width
#####################################################################################
def write_data(text, options, outputname, module):
''' dumps module output to a file or the screen, as requested '''
if options.output_dir is not None:
fname = os.path.join(options.output_dir, outputname % module)
fname = fname.replace(".py","")
f = open(fname, 'w')
f.write(text.encode('utf-8'))
f.close()
else:
print(text)
#####################################################################################
def list_modules(module_dir, depth=0):
''' returns a hash of categories, each category being a hash of module names to file paths '''
categories = dict()
module_info = dict()
aliases = defaultdict(set)
# * windows powershell modules have documentation stubs in python docstring
# format (they are not executed) so skip the ps1 format files
# * One glob level for every module level that we're going to traverse
files = glob.glob("%s/*.py" % module_dir) + glob.glob("%s/*/*.py" % module_dir) + glob.glob("%s/*/*/*.py" % module_dir) + glob.glob("%s/*/*/*/*.py" % module_dir)
for module_path in files:
if module_path.endswith('__init__.py'):
continue
category = categories
mod_path_only = os.path.dirname(module_path[len(module_dir) + 1:])
# Start at the second directory because we don't want the "vendor"
# directories (core, extras)
for new_cat in mod_path_only.split('/')[1:]:
if new_cat not in category:
category[new_cat] = dict()
category = category[new_cat]
module = os.path.splitext(os.path.basename(module_path))[0]
if module in module_docs.BLACKLIST_MODULES:
# Do not list blacklisted modules
continue
if module.startswith("_") and os.path.islink(module_path):
source = os.path.splitext(os.path.basename(os.path.realpath(module_path)))[0]
module = module.replace("_","",1)
aliases[source].add(module)
continue
category[module] = module_path
module_info[module] = module_path
# keep module tests out of becoming module docs
if 'test' in categories:
del categories['test']
return module_info, categories, aliases
#####################################################################################
def generate_parser():
''' generate an optparse parser '''
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options] arg1 arg2',
description='Generate module documentation from metadata',
)
p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
p.add_option('-V', action='version', help='Show version number and exit')
return p
#####################################################################################
def jinja2_environment(template_dir, typ):
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True,
)
env.globals['xline'] = rst_xline
if typ == 'rst':
env.filters['convert_symbols_to_format'] = rst_ify
env.filters['html_ify'] = html_ify
env.filters['fmt'] = rst_fmt
env.filters['xline'] = rst_xline
template = env.get_template('rst.j2')
outputname = "%s_module.rst"
else:
raise Exception("unknown module format type: %s" % typ)
return env, template, outputname
#####################################################################################
def too_old(added):
if not added:
return False
try:
added_tokens = str(added).split(".")
readded = added_tokens[0] + "." + added_tokens[1]
added_float = float(readded)
except ValueError as e:
warnings.warn("Could not parse %s: %s" % (added, str(e)))
return False
return (added_float < TO_OLD_TO_BE_NOTABLE)
def process_module(module, options, env, template, outputname, module_map, aliases):
fname = module_map[module]
if isinstance(fname, dict):
return "SKIPPED"
basename = os.path.basename(fname)
deprecated = False
# ignore files with extensions
if not basename.endswith(".py"):
return
elif module.startswith("_"):
if os.path.islink(fname):
return # ignore, its an alias
deprecated = True
module = module.replace("_","",1)
print("rendering: %s" % module)
# use ansible core library to parse out doc metadata YAML and plaintext examples
doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose)
# crash if module is missing documentation and not explicitly hidden from docs index
if doc is None:
sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
sys.exit(1)
if deprecated and 'deprecated' not in doc:
sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module))
sys.exit(1)
if "/core/" in fname:
doc['core'] = True
else:
doc['core'] = False
if module in aliases:
doc['aliases'] = aliases[module]
all_keys = []
if not 'version_added' in doc:
sys.stderr.write("*** ERROR: missing version_added in: %s ***\n" % module)
sys.exit(1)
added = 0
if doc['version_added'] == 'historical':
del doc['version_added']
else:
added = doc['version_added']
# don't show version added information if it's too old to be called out
if too_old(added):
del doc['version_added']
if 'options' in doc and doc['options']:
for (k,v) in iteritems(doc['options']):
# don't show version added information if it's too old to be called out
if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']):
del doc['options'][k]['version_added']
if not 'description' in doc['options'][k]:
raise AnsibleError("Missing required description for option %s in %s " % (k, module))
if not 'required' in doc['options'][k]:
raise AnsibleError("Missing required 'required' for option %s in %s " % (k, module))
if not isinstance(doc['options'][k]['description'],list):
doc['options'][k]['description'] = [doc['options'][k]['description']]
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = fname
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['ansible_version'] = options.ansible_version
doc['plainexamples'] = examples #plain text
if returndocs:
try:
doc['returndocs'] = yaml.safe_load(returndocs)
except:
print("could not load yaml: %s" % returndocs)
raise
else:
doc['returndocs'] = None
# here is where we build the table of contents...
try:
text = template.render(doc)
except Exception as e:
raise AnsibleError("Failed to render doc for %s: %s" % (fname, str(e)))
write_data(text, options, outputname, module)
return doc['short_description']
#####################################################################################
def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases):
modstring = module
if modstring.startswith('_'):
modstring = module[1:]
modname = modstring
if module in deprecated:
modstring = modstring + DEPRECATED
elif module not in core:
modstring = modstring + NOTCORE
category_file.write(" %s - %s <%s_module>\n" % (to_bytes(modstring), to_bytes(rst_ify(module_map[module][1])), to_bytes(modname)))
def process_category(category, categories, options, env, template, outputname):
### FIXME:
# We no longer conceptually deal with a mapping of category names to
# modules to file paths. Instead we want several different records:
# (1) Mapping of module names to file paths (what's presently used
# as categories['all']
# (2) Mapping of category names to lists of module names (what you'd
# presently get from categories[category_name][subcategory_name].keys()
# (3) aliases (what's presently in categories['_aliases']
#
# list_modules() now returns those. Need to refactor this function and
# main to work with them.
module_map = categories[category]
module_info = categories['all']
aliases = {}
if '_aliases' in categories:
aliases = categories['_aliases']
category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category)
category_file = open(category_file_path, "w")
print("*** recording category %s in %s ***" % (category, category_file_path))
# start a new category file
category = category.replace("_"," ")
category = category.title()
modules = []
deprecated = []
core = []
for module in module_map.keys():
if isinstance(module_map[module], dict):
for mod in (m for m in module_map[module].keys() if m in module_info):
if mod.startswith("_"):
deprecated.append(mod)
elif '/core/' in module_info[mod][0]:
core.append(mod)
else:
if module not in module_info:
continue
if module.startswith("_"):
deprecated.append(module)
elif '/core/' in module_info[module][0]:
core.append(module)
modules.append(module)
modules.sort(key=lambda k: k[1:] if k.startswith('_') else k)
category_header = "%s Modules" % (category.title())
underscores = "`" * len(category_header)
category_file.write("""\
%s
%s
.. toctree:: :maxdepth: 1
""" % (category_header, underscores))
sections = []
for module in modules:
if module in module_map and isinstance(module_map[module], dict):
sections.append(module)
continue
else:
print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_info, aliases)
sections.sort()
for section in sections:
category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section)))
category_file.write(".. toctree:: :maxdepth: 1\n\n")
section_modules = module_map[section].keys()
section_modules.sort(key=lambda k: k[1:] if k.startswith('_') else k)
#for module in module_map[section]:
for module in (m for m in section_modules if m in module_info):
print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_info, aliases)
category_file.write("""\n\n
.. note::
- %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale.
- %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less actively maintained than 'core' modules.
- Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub <http://github.com/ansible/ansible-modules-core>`_, extras tickets to `ansible/ansible-modules-extras on GitHub <http://github.com/ansible/ansible-modules-extras>`_
""" % (DEPRECATED, NOTCORE))
category_file.close()
# TODO: end a new category file
#####################################################################################
def validate_options(options):
''' validate option parser options '''
if not options.module_dir:
print("--module-dir is required", file=sys.stderr)
sys.exit(1)
if not os.path.exists(options.module_dir):
print("--module-dir does not exist: %s" % options.module_dir, file=sys.stderr)
sys.exit(1)
if not options.template_dir:
print("--template-dir must be specified")
sys.exit(1)
#####################################################################################
def main():
p = generate_parser()
(options, args) = p.parse_args()
validate_options(options)
env, template, outputname = jinja2_environment(options.template_dir, options.type)
mod_info, categories, aliases = list_modules(options.module_dir)
categories['all'] = mod_info
categories['_aliases'] = aliases
category_names = [c for c in categories.keys() if not c.startswith('_')]
category_names.sort()
# Write master category list
category_list_path = os.path.join(options.output_dir, "modules_by_category.rst")
with open(category_list_path, "w") as category_list_file:
category_list_file.write("Module Index\n")
category_list_file.write("============\n")
category_list_file.write("\n\n")
category_list_file.write(".. toctree::\n")
category_list_file.write(" :maxdepth: 1\n\n")
for category in category_names:
category_list_file.write(" list_of_%s_modules\n" % category)
#
# Import all the docs into memory
#
module_map = mod_info.copy()
skipped_modules = set()
for modname in module_map:
result = process_module(modname, options, env, template, outputname, module_map, aliases)
if result == 'SKIPPED':
del categories['all'][modname]
else:
categories['all'][modname] = (categories['all'][modname], result)
#
# Render all the docs to rst via category pages
#
for category in category_names:
process_category(category, categories, options, env, template, outputname)
if __name__ == '__main__':
main()
| donckers/ansible | hacking/module_formatter.py | Python | gpl-3.0 | 18,660 |
import json
from decimal import Decimal
from django import test
from factories import (
workflow_models as w_factories,
indicators_models as i_factories
)
from tola_management.models import ProgramAuditLog
from indicators.models import Indicator, Result, DisaggregatedValue
class TestResultAuditLog(test.TestCase):
@classmethod
def setUpClass(cls):
super(TestResultAuditLog, cls).setUpClass()
cls.country = w_factories.CountryFactory(country="Test Country", code="TC")
cls.program = w_factories.RFProgramFactory(name="Test Program")
cls.program.country.add(cls.country)
cls.label_count = 5
cls.disagg_type = i_factories.DisaggregationTypeFactory(
disaggregation_type="Test Disagg Type",
labels=[f"DisaggLabel{i}" for i in range(cls.label_count)],
country=cls.country
)
cls.disagg_labels = cls.disagg_type.disaggregationlabel_set.order_by("id")
cls.tola_user = w_factories.TolaUserFactory(country=cls.country)
w_factories.grant_country_access(cls.tola_user, cls.country, 'basic_admin')
w_factories.grant_program_access(cls.tola_user, cls.program, cls.country, role='high')
cls.indicator = i_factories.RFIndicatorFactory(targets=20, program=cls.program)
def setUp(self):
self.client.force_login(user=self.tola_user.user)
def test_audit_save(self):
target = self.indicator.periodictargets.first()
result_data = {
'achieved': 5,
'target': target.id,
'date_collected': self.program.reporting_period_start,
'indicator': self.indicator.id
}
# Audit log entry should be triggered by result creation
response = self.client.post(
f'/indicators/result_add/{self.indicator.id}/', result_data, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
result = self.indicator.result_set.first()
audits = ProgramAuditLog.objects.all()
self.assertEqual(response.status_code, 200)
self.assertEqual(result.achieved, 5)
self.assertEqual(audits.count(), 1)
# Audit log entry should be triggered by result update
result_data.update({'result': result.id, 'achieved': 6})
response_update = self.client.post(
f'/indicators/result_update/{result.id}/', result_data, **{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
result.refresh_from_db()
audits = ProgramAuditLog.objects.all()
self.assertEqual(response_update.status_code, 200)
self.assertEqual(result.achieved, 6)
self.assertEqual(audits.count(), 2)
def test_audit_number_format(self):
"""
Now test that the audit log values are stored property (i.e. with the right number
of decimal places, not as exponents, etc...
"""
target = self.indicator.periodictargets.first()
result_data = {
'achieved': 7,
'target': target.id,
'date_collected': self.program.reporting_period_start,
'indicator': self.indicator.id
}
response = self.client.post(
f'/indicators/result_add/{self.indicator.id}/',
result_data,
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
result = self.indicator.result_set.first()
self.assertEqual(response.status_code, 200)
result_data.update({'result': result.id, 'achieved': 8.01})
response = self.client.post(
f'/indicators/result_update/{result.id}/',
result_data,
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
self.assertEqual(response.status_code, 200)
self.assertEqual(ProgramAuditLog.objects.count(), 2)
audit = ProgramAuditLog.objects.order_by("id").last()
previous_entry = json.loads(audit.previous_entry)
new_entry = json.loads(audit.new_entry)
self.assertEqual(
Decimal(previous_entry['value']),
Decimal("7"),
"Trailing zeros in prev values should be ignored")
self.assertEqual(
Decimal(new_entry['value']),
Decimal("8.01"),
"Decimals to two places should be respected in new values")
self.assertEqual(ProgramAuditLog.objects.count(), 2)
result_data.update({'result': result.id, 'achieved': 50000.00})
response = self.client.post(
f'/indicators/result_update/{result.id}/',
result_data,
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
self.assertEqual(response.status_code, 200)
audit = ProgramAuditLog.objects.order_by("id").last()
previous_entry = json.loads(audit.previous_entry)
new_entry = json.loads(audit.new_entry)
self.assertEqual(
Decimal(previous_entry['value']),
Decimal("8.01"),
"Decimals to two places should be respected in prev values")
self.assertEqual(
Decimal(new_entry['value']),
Decimal("50000.0"),
"Values should not be saved as exponents in new values")
self.assertEqual(ProgramAuditLog.objects.count(), 3)
result_data.update({'result': result.id, 'achieved': 0.05})
response = self.client.post(
f'/indicators/result_update/{result.id}/',
result_data,
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
self.assertEqual(response.status_code, 200)
audit = ProgramAuditLog.objects.order_by("id").last()
previous_entry = json.loads(audit.previous_entry)
new_entry = json.loads(audit.new_entry)
self.assertEqual(
Decimal(previous_entry['value']),
Decimal("50000"),
"Values should not be saved as exponents in prev values")
self.assertEqual(
Decimal(new_entry['value']),
Decimal(".05"),
"Values less than one should be saved properly in new values")
self.assertEqual(ProgramAuditLog.objects.count(), 4)
result_data.update({'result': result.id, 'achieved': 60000.00})
response = self.client.post(
f'/indicators/result_update/{result.id}/',
result_data,
**{'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'})
self.assertEqual(response.status_code, 200)
audit = ProgramAuditLog.objects.order_by("id").last()
previous_entry = json.loads(audit.previous_entry)
self.assertEqual(
Decimal(previous_entry['value']),
Decimal(".05"),
"Values less than one should be saved properly in prev values")
self.assertEqual(ProgramAuditLog.objects.count(), 5)
# Test if disaggregation number formats are correct.
disagg_values_initial = [50000.00, 9981, 5.55, 14.5, 1.50]
disagg_values_display = set(Decimal(k) for k in ["50000", "9981", "5.55", "14.5", "1.5"])
#
disagg_value_objects = []
for index, disagg_value in enumerate(disagg_values_initial):
disagg_value_object = DisaggregatedValue.objects.create(
result=result,
category=self.disagg_labels[index],
value=disagg_value,
)
disagg_value_objects.append(disagg_value_object)
logged_fields = result.logged_fields
raw_logged_disagg_values = set([disagg['value'] for disagg in logged_fields['disaggregation_values'].values()])
self.assertSetEqual(disagg_values_display, raw_logged_disagg_values)
def test_disaggregation_display_data(self):
indicator = i_factories.RFIndicatorFactory(
targets=20, program=self.program, target_frequency=Indicator.ANNUAL)
indicator.disaggregation.add(self.disagg_type)
target = indicator.periodictargets.first()
result_data = {
'achieved': 18,
'periodic_target': target,
'date_collected': target.start_date,
'indicator': indicator
}
result = Result.objects.create(**result_data)
ProgramAuditLog.log_result_created(self.tola_user.user, indicator, result, "a rationale")
creation_log = ProgramAuditLog.objects.all().order_by('-pk')[0]
diff_list = creation_log.diff_list
diff_keys = set([diff['name'] for diff in diff_list])
self.assertSetEqual(
diff_keys, set(result.logged_fields.keys()),
"Result creation should log all tracked fields, whether a value has been entered for the field or not"
)
result_create_data = result.logged_fields
indexes = [0, 2, 4]
for label in [list(self.disagg_labels)[i] for i in indexes]:
DisaggregatedValue.objects.create(
category=label,
value=(result.achieved/len(indexes)),
result=result
)
ProgramAuditLog.log_result_updated(
self.tola_user.user, indicator, result_create_data, result.logged_fields, 'abcdefg'
)
update_log = ProgramAuditLog.objects.all().order_by('-pk')[0]
diff_list = update_log.diff_list
diff_keys = [diff["name"] for diff in diff_list]
self.assertSetEqual(
set(diff_keys), {'disaggregation_values', 'value'},
"Result value and disaggregation values should both be present when only disaggregation values have changed"
)
disagg_values = ""
for diff in diff_list:
if diff['name'] == "disaggregation_values":
disagg_values = diff
break
self.assertEqual(
len(disagg_values['prev']), len(indexes),
"Only the disaggregation values that have changed should appear in the diff list"
)
def test_logged_field_order_counts(self):
# The fields being tracked in the audit log (which is determined by a property of the model) should always
# be present in the logged field order list (which is also a property of the model). The logged field
# order list may contain more fields than are currently tracked because the models may change over time.
models_to_test = [
i_factories.IndicatorFactory(),
i_factories.ResultFactory(),
i_factories.LevelFactory(),
]
for model in models_to_test:
logged_field_set = set(model.logged_fields.keys())
logged_fields_order_set = set(model.logged_field_order())
self.assertEqual(len(logged_field_set - logged_fields_order_set), 0)
| mercycorps/TolaActivity | tola_management/tests/test_program_audit_log.py | Python | apache-2.0 | 10,601 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from comic_dl import globalFunctions
import os
class ReadComicsIO():
def __init__(self, manga_url, download_directory, chapter_range, **kwargs):
current_directory = kwargs.get("current_directory")
conversion = kwargs.get("conversion")
keep_files = kwargs.get("keep_files")
self.logging = kwargs.get("log_flag")
self.sorting = kwargs.get("sorting_order")
self.comic_name = self.name_cleaner(manga_url)
self.print_index = kwargs.get("print_index")
if "/comic/" in manga_url:
# https://readcomics.io/the-walking-dead/chapter-178/full
self.full_series(comic_url=manga_url, comic_name=self.comic_name,
sorting=self.sorting, download_directory=download_directory, chapter_range=chapter_range,
conversion=conversion, keep_files=keep_files)
else:
if "/full" not in manga_url:
manga_url += "/full"
self.single_chapter(manga_url, self.comic_name, download_directory, conversion=conversion,
keep_files=keep_files)
def name_cleaner(self, url):
manga_name = str(str(url).split("/")[3].strip().replace("_", " ").replace("-", " ").title())
return manga_name
def single_chapter(self, comic_url, comic_name, download_directory, conversion, keep_files):
chapter_number = str(str(comic_url).split("/")[4].strip().replace("_", " ").replace("-", " ").title())
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url)
img_list = []
temp_list = source.find_all("div", {"class": "chapter-container"})
for elements in temp_list:
x = elements.findAll('img')
for a in x:
img_list.append(str(a['src']).strip())
file_directory = globalFunctions.GlobalFunctions().create_file_directory(chapter_number, comic_name)
directory_path = os.path.realpath(str(download_directory) + "/" + str(file_directory))
if not os.path.exists(directory_path):
os.makedirs(directory_path)
links = []
file_names = []
for current_chapter, image_link in enumerate(img_list):
current_chapter += 1
file_name = str(globalFunctions.GlobalFunctions().prepend_zeroes(current_chapter, len(img_list))) + ".jpg"
file_names.append(file_name)
links.append(image_link)
globalFunctions.GlobalFunctions().multithread_download(chapter_number, comic_name, comic_url, directory_path,
file_names, links, self.logging)
globalFunctions.GlobalFunctions().conversion(directory_path, conversion, keep_files, comic_name,
chapter_number)
return 0
def full_series(self, comic_url, comic_name, sorting, download_directory, chapter_range, conversion, keep_files):
source, cookies = globalFunctions.GlobalFunctions().page_downloader(manga_url=comic_url)
all_links = []
chap_holder_div = source.find_all('ul', {'class': 'basic-list'})
# print(comic_name)
for single_node in chap_holder_div:
x = single_node.findAll('a')
for a in x:
all_links.append(str(a['href']).strip())
if chapter_range != "All":
# -1 to shift the episode number accordingly to the INDEX of it. List starts from 0 xD!
starting = int(str(chapter_range).split("-")[0]) - 1
if str(chapter_range).split("-")[1].isdigit():
ending = int(str(chapter_range).split("-")[1])
else:
ending = len(all_links)
indexes = [x for x in range(starting, ending)]
all_links = [all_links[x] for x in indexes][::-1]
else:
all_links = all_links
if not all_links:
print("Couldn't Find the chapter list")
return 1
# all_links.pop(0) # Because this website lists the next chapter, which is NOT available.
if self.print_index:
idx = 0
for chap_link in all_links:
idx = idx + 1
print(str(idx) + ": " + str(chap_link))
return
if str(sorting).lower() in ['new', 'desc', 'descending', 'latest']:
for chap_link in all_links:
try:
self.single_chapter(comic_url=str(chap_link) + "/full", comic_name=comic_name,
download_directory=download_directory,
conversion=conversion, keep_files=keep_files)
# if chapter range contains "__EnD__" write new value to config.json
# @Chr1st-oo - modified condition due to some changes on automatic download and config.
if chapter_range != "All" and (chapter_range.split("-")[1] == "__EnD__" or len(chapter_range.split("-")) == 3):
globalFunctions.GlobalFunctions().addOne(comic_url)
except Exception as e:
break # break to continue processing other mangas
elif str(sorting).lower() in ['old', 'asc', 'ascending', 'oldest', 'a']:
for chap_link in all_links[::-1]:
try:
self.single_chapter(comic_url=str(chap_link) + "/full", comic_name=comic_name,
download_directory=download_directory,
conversion=conversion, keep_files=keep_files)
# if chapter range contains "__EnD__" write new value to config.json
# @Chr1st-oo - modified condition due to some changes on automatic download and config.
if chapter_range != "All" and (chapter_range.split("-")[1] == "__EnD__" or len(chapter_range.split("-")) == 3):
globalFunctions.GlobalFunctions().addOne(comic_url)
except Exception as e:
break # break to continue processing other mangas
return 0
| Xonshiz/comic-dl | comic_dl/sites/readComicsIO.py | Python | mit | 6,267 |
#!/usr/bin/python
#-*-coding: utf-8-*-
#
# resources.py
#
# Author: Miguel Angel Martinez <miguelang.martinezl@gmail.com>
#
import os
resourceBasePath = ''
def setBasePath(path):
global resourceBasePath
resourceBasePath = path
def getPath(name):
path = os.path.normpath(os.path.join(resourceBasePath, name))
return path
| MartinezLopez/icue | src/util/resources.py | Python | gpl-2.0 | 335 |
from django.urls import reverse
from django.views.generic import TemplateView, CreateView, FormView
class CompletedPage(TemplateView):
template_name = "contact_form/contact_completed.html"
class ContactFormMixin(object):
"""
Form view that sends email when form is valid. You'll need
to define your own form_class and template_name.
"""
def form_valid(self, form):
form.send_email(self.request)
return super(ContactFormMixin, self).form_valid(form)
def get_success_url(self):
return reverse("contact_form:completed")
class ContactFormView(ContactFormMixin, FormView):
pass
class ContactModelFormView(ContactFormMixin, CreateView):
pass
| madisona/django-contact-form | contact_form/views.py | Python | bsd-3-clause | 705 |
from . import *
@route('/')
def do_env(request):
def _env():
for x in sorted(request.environ.items()):
yield '%s: %r\n' % x
return Response(_env(), mimetype='text/plain')
| mikeboers/Nitrogen | example/controllers/env.py | Python | bsd-3-clause | 200 |
import sublime
import sublime_plugin
class EraseViewCommand(sublime_plugin.TextCommand):
def run(self, edit, size=0):
self.view.erase(edit, sublime.Region(0, size))
| klaascuvelier/sublime-phpunit | commands/erase_view_command.py | Python | bsd-3-clause | 178 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('inventory', '0005_remove_distribution_validation_extension'),
]
operations = [
migrations.RenameField(
model_name='distribution',
old_name='validation_errors',
new_name='errors',
),
migrations.RenameField(
model_name='distribution',
old_name='validation_encoding',
new_name='http_charset',
),
migrations.RenameField(
model_name='distribution',
old_name='validation_content_type',
new_name='http_content_type',
),
migrations.RenameField(
model_name='distribution',
old_name='validation_headers',
new_name='http_headers',
),
migrations.AddField(
model_name='distribution',
name='http_content_length',
field=models.BigIntegerField(null=True),
preserve_default=True,
),
]
| opennorth/inventory | inventory/migrations/0006_auto_20150217_2002.py | Python | mit | 1,132 |
from __future__ import unicode_literals
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import FeatureHasher
from sklearn.utils.testing import (assert_raises, assert_true, assert_equal,
ignore_warnings, fails_if_pypy)
pytestmark = fails_if_pypy
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": u"string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
@ignore_warnings(category=DeprecationWarning)
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": u"abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 1], x1_nz)
assert_equal([1, 1, 4], x2_nz)
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert_equal([1], x1_nz)
assert_equal([1], x2_nz)
assert_array_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
@ignore_warnings(category=DeprecationWarning)
def test_hasher_alternate_sign():
X = [list("Thequickbrownfoxjumped")]
Xt = FeatureHasher(alternate_sign=True, non_negative=False,
input_type='string').fit_transform(X)
assert Xt.data.min() < 0 and Xt.data.max() > 0
Xt = FeatureHasher(alternate_sign=True, non_negative=True,
input_type='string').fit_transform(X)
assert Xt.data.min() > 0
Xt = FeatureHasher(alternate_sign=False, non_negative=True,
input_type='string').fit_transform(X)
assert Xt.data.min() > 0
Xt_2 = FeatureHasher(alternate_sign=False, non_negative=False,
input_type='string').fit_transform(X)
# With initially positive features, the non_negative option should
# have no impact when alternate_sign=False
assert_array_equal(Xt.data, Xt_2.data)
@ignore_warnings(category=DeprecationWarning)
def test_hash_collisions():
X = [list("Thequickbrownfoxjumped")]
Xt = FeatureHasher(alternate_sign=True, non_negative=False,
n_features=1, input_type='string').fit_transform(X)
# check that some of the hashed tokens are added
# with an opposite sign and cancel out
assert abs(Xt.data[0]) < len(X[0])
Xt = FeatureHasher(alternate_sign=True, non_negative=True,
n_features=1, input_type='string').fit_transform(X)
assert abs(Xt.data[0]) < len(X[0])
Xt = FeatureHasher(alternate_sign=False, non_negative=True,
n_features=1, input_type='string').fit_transform(X)
assert Xt.data[0] == len(X[0])
@ignore_warnings(category=DeprecationWarning)
def test_hasher_negative():
X = [{"foo": 2, "bar": -4, "baz": -1}.items()]
Xt = FeatureHasher(alternate_sign=False, non_negative=False,
input_type="pair").fit_transform(X)
assert_true(Xt.data.min() < 0 and Xt.data.max() > 0)
Xt = FeatureHasher(alternate_sign=False, non_negative=True,
input_type="pair").fit_transform(X)
assert_true(Xt.data.min() > 0)
Xt = FeatureHasher(alternate_sign=True, non_negative=False,
input_type="pair").fit_transform(X)
assert_true(Xt.data.min() < 0 and Xt.data.max() > 0)
Xt = FeatureHasher(alternate_sign=True, non_negative=True,
input_type="pair").fit_transform(X)
assert_true(Xt.data.min() > 0)
| vortex-ape/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | Python | bsd-3-clause | 6,259 |
"""Reads in an HTML file from the command line and pretty-prints it."""
from xml.dom.ext.reader import HtmlLib
from xml.dom import ext
def read_html_from_file(fileName):
#build a DOM tree from the file
reader = HtmlLib.Reader()
dom_object = reader.fromUri(fileName)
#strip any ignorable white-space in preparation for pretty-printing
ext.StripHtml(dom_object)
#pretty-print the node
ext.PrettyPrint(dom_object)
#reclaim the object
reader.releaseNode(dom_object);
if __name__ == '__main__':
import sys
read_html_from_file(sys.argv[1])
| Pikecillo/genna | external/PyXML-0.8.4/demo/dom/dom_from_html_file.py | Python | gpl-2.0 | 585 |
from LTTL.Input import Input
from LTTL.Segmenter import concatenate
def main():
input1 = Input('hello', 'str1')
input2 = Input('world', 'str2')
input3 = Input('!', 'str3')
merged = concatenate([input1, input2, input3])
print(merged.to_string())
if __name__ == '__main__':
main()
| axanthos/LTTL | bugs/solved/bug_concatenate.py | Python | gpl-3.0 | 307 |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Unit tests for watchlistparser.py.'''
import logging
import sys
from webkitpy.common import webkitunittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.watchlist.watchlistparser import WatchListParser
class WatchListParserTest(webkitunittest.TestCase):
def setUp(self):
webkitunittest.TestCase.setUp(self)
self._watch_list_parser = WatchListParser()
def test_bad_section(self):
watch_list = ('{"FOO": {}}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='Unknown section "FOO" in watch list.\n')
def test_section_typo(self):
watch_list = ('{"DEFINTIONS": {}}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='Unknown section "DEFINTIONS" in watch list.'
+ '\n\nPerhaps it should be DEFINITIONS.\n')
def test_bad_definition(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1|A": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='Invalid character "|" in definition "WatchList1|A".\n')
def test_bad_filename_regex(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r"*",'
' "more": r"RefCounted",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='The regex "*" is invalid due to "nothing to repeat".\n')
def test_bad_more_regex(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r"aFileName\\.cpp",'
' "more": r"*",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='The regex "*" is invalid due to "nothing to repeat".\n')
def test_bad_match_type(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "nothing_matches_this": r".*\\MyFileName\\.cpp",'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='Unknown pattern type "nothing_matches_this" in definition "WatchList1".\n')
def test_match_type_typo(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "iflename": r".*\\MyFileName\\.cpp",'
' "more": r"RefCounted",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='Unknown pattern type "iflename" in definition "WatchList1".'
+ '\n\nPerhaps it should be filename.\n')
def test_empty_definition(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='The definition "WatchList1" has no patterns, so it should be deleted.\n')
def test_empty_cc_rule(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": [],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='A rule for definition "WatchList1" is empty, so it should be deleted.\n'
+ 'The following definitions are not used and should be removed: WatchList1\n')
def test_cc_rule_with_invalid_email(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
' "CC_RULES": {'
' "WatchList1": ["levin+bad+email@chromium.org"],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='The email alias levin+bad+email@chromium.org which is'
+ ' in the watchlist is not listed as a contributor in committers.py\n')
def test_empty_message_rule(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
' "MESSAGE_RULES": {'
' "WatchList1": ['
' ],'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='A rule for definition "WatchList1" is empty, so it should be deleted.\n'
+ 'The following definitions are not used and should be removed: WatchList1\n')
def test_unused_defintion(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='The following definitions are not used and should be removed: WatchList1\n')
def test_cc_rule_with_undefined_defintion(self):
watch_list = (
'{'
' "CC_RULES": {'
' "WatchList1": ["levin@chromium.org"]'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='In section "CC_RULES", the following definitions are not used and should be removed: WatchList1\n')
def test_message_rule_with_undefined_defintion(self):
watch_list = (
'{'
' "MESSAGE_RULES": {'
' "WatchList1": ["The message."]'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='In section "MESSAGE_RULES", the following definitions are not used and should be removed: WatchList1\n')
def test_cc_rule_with_undefined_defintion_with_suggestion(self):
watch_list = (
'{'
' "DEFINITIONS": {'
' "WatchList1": {'
' "filename": r".*\\MyFileName\\.cpp",'
' },'
' },'
' "CC_RULES": {'
' "WatchList": ["levin@chromium.org"]'
' },'
' "MESSAGE_RULES": {'
' "WatchList1": ["levin@chromium.org"]'
' },'
'}')
OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
expected_logs='In section "CC_RULES", the following definitions are not used and should be removed: WatchList'
+ '\n\nPerhaps it should be WatchList1.\n')
| leighpauls/k2cro4 | third_party/WebKit/Tools/Scripts/webkitpy/common/watchlist/watchlistparser_unittest.py | Python | bsd-3-clause | 10,786 |
# -*- coding: utf-8 -*-
# Standard library
import logging
# PyQT
from qgis.PyQt.QtCore import pyqtSignal, QObject, pyqtSlot
# Plugin modules
from ..tools import IsogeoPlgTools
# ############################################################################
# ########## Globals ###############
# ##################################
logger = logging.getLogger("IsogeoQgisPlugin")
plg_tools = IsogeoPlgTools()
# ############################################################################
# ########## Classes ###############
# ##################################
class SharesParser(QObject):
"""Build the string informing the user about the shares feeding his plugin
from the Isogeo API's response to a share request.
"""
shares_ready = pyqtSignal(str)
def __init__(self, app_base_url: str):
QObject.__init__(self)
self.tr = object
self.app_base_url = app_base_url
@pyqtSlot(list)
def send_share_info(self, shares: list):
"""Slot connected to ApiRequester.shares_sig signal emitted when a response
to a share request has been received from the Isogeo's API, validated and parsed.
'shares' parameter correspond to the content of shares request's reply passed
by the ApiRequester.handle_reply method (see modules/api/request.py).
Once the string building from 'result' is done, the shares_ready signal is emitted
passing this string to a connected slot : Isogeo.write_shares_info (see isogeo.py).
:param list shares: list of shares feeding the application
"""
logger.debug("Application properties provided by the Isogeo API.")
content = shares
if len(content) > 0:
text = "<html>" # opening html content
# Isogeo application authenticated in the plugin
app = content[0].get("applications")[0]
text += self.tr(
"<p>This plugin is authenticated as " "<a href='{}'>{}</a> and ",
context=__class__.__name__,
).format(
app.get("url", "https://help.isogeo.com/qgis/"),
app.get("name", "Isogeo plugin for QGIS"),
)
# shares feeding the application
if len(content) == 1:
text += self.tr(
" powered by 1 share:</p></br>", context=__class__.__name__
)
else:
text += self.tr(
" powered by {} shares:</p></br>", context=__class__.__name__
).format(len(content))
# shares details
for share in content:
# share variables
creator_name = share.get("_creator").get("contact").get("name")
creator_email = share.get("_creator").get("contact").get("email")
creator_id = share.get("_creator").get("_tag")[6:]
share_url = "{}/groups/{}/admin/shares/{}".format(
self.app_base_url, creator_id, share.get("_id")
)
# formatting text
text += "<p><a href='{}'><b>{}</b></a></p>".format(
share_url, share.get("name")
)
text += self.tr(
"<p>Updated: {}</p>", context=__class__.__name__
).format(plg_tools.handle_date(share.get("_modified")))
text += self.tr(
"<p>Contact: {} - {}</p>", context=__class__.__name__
).format(creator_name, creator_email)
text += "<p><hr></p>"
text += "</html>"
self.shares_ready.emit(text)
else:
logger.debug("The plugin is not powered by any shares.")
self.shares_ready.emit("no_shares")
return
# #############################################################################
# ##### Stand alone program ########
# ##################################
if __name__ == "__main__":
"""Standalone execution."""
| isogeo/isogeo-plugin-qgis | modules/api/shares.py | Python | gpl-3.0 | 4,030 |
#!/usr/bin/env python3
#
# Generates a DocBook section documenting all PLCAPI methods on
# stdout.
#
# Mark Huang <mlhuang@cs.princeton.edu>
# Copyright (C) 2006 The Trustees of Princeton University
#
# dec 2018
# going for python3; xml.dom.minidom has changed a lot
# working around the changes in a rather quick & dirty way
import xml.dom.minidom
from xml.dom.minidom import Element, parseString
from PLC.Parameter import Parameter, Mixed, xmlrpc_type, python_type
# can no longer create elements out of a document
dom = parseString('<dummydoc/>')
def text_node(text):
global dom
return dom.createTextNode(text)
class TextElement(Element):
"""<tagName>text</tagName>"""
def __init__(self, tagName, text = None):
Element.__init__(self, tagName)
if text is not None:
self.appendChild(text_node(text))
def writexml(self, writer, indent="", addindent="", newl=""):
writer.write(indent)
Element.writexml(self, writer, "", "", "")
writer.write(newl)
class simpleElement(TextElement):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
class paraElement(simpleElement):
"""<para>text</para>"""
def __init__(self, text = None):
simpleElement.__init__(self, 'para', text)
class blockquoteElement(Element):
"""<blockquote><para>text...</para><para>...text</para></blockquote>"""
def __init__(self, text = None):
Element.__init__(self, 'blockquote')
if text is not None:
# Split on blank lines
lines = [line.strip() for line in text.strip().split("\n")]
lines = "\n".join(lines)
paragraphs = lines.split("\n\n")
for paragraph in paragraphs:
self.appendChild(paraElement(paragraph))
def param_type(param):
"""Return the XML-RPC type of a parameter."""
if isinstance(param, Mixed) and len(param):
subtypes = [param_type(subparam) for subparam in param]
return " or ".join(subtypes)
elif isinstance(param, (list, tuple, set)) and len(param):
return "array of " + " or ".join([param_type(subparam) for subparam in param])
else:
return xmlrpc_type(python_type(param))
class paramElement(Element):
"""An optionally named parameter."""
def __init__(self, name, param):
# <listitem>
Element.__init__(self, 'listitem')
global dom
description = dom.createElement('para')
if name:
description.appendChild(simpleElement('parameter', name))
description.appendChild(text_node(": "))
description.appendChild(text_node(param_type(param)))
if isinstance(param, (list, tuple, set)) and len(param) == 1:
param = param[0]
if isinstance(param, Parameter):
description.appendChild(text_node(", " + param.doc))
param = param.type
self.appendChild(description)
if isinstance(param, dict):
itemizedlist = dom.createElement('itemizedlist')
self.appendChild(itemizedlist)
for name, subparam in param.items():
itemizedlist.appendChild(paramElement(name, subparam))
elif isinstance(param, (list, tuple, set)) and len(param):
itemizedlist = dom.createElement('itemizedlist')
self.appendChild(itemizedlist)
for subparam in param:
itemizedlist.appendChild(paramElement(None, subparam))
class DocBook:
def __init__ (self, functions_list):
self.functions_list = functions_list
def Process (self):
global dom
for func in self.functions_list:
method = func.name
if func.status == "deprecated":
continue
(min_args, max_args, defaults) = func.args()
# with python3 it looks like an element can't be sfa_created
# outside of a document
section = dom.createElement('section')
section.setAttribute('id', func.name)
section.appendChild(simpleElement('title', func.name))
prototype = "%s (%s)" % (method, ", ".join(max_args))
para = paraElement('Prototype:')
para.appendChild(blockquoteElement(prototype))
section.appendChild(para)
para = paraElement('Description:')
para.appendChild(blockquoteElement(func.__doc__))
section.appendChild(para)
para = paraElement('Allowed Roles:')
para.appendChild(blockquoteElement(", ".join(func.roles)))
section.appendChild(para)
section.appendChild(paraElement('Parameters:'))
params = Element('itemizedlist')
if func.accepts:
for name, param, default in zip(max_args, func.accepts, defaults):
params.appendChild(paramElement(name, param))
else:
listitem = Element('listitem')
listitem.appendChild(paraElement('None'))
params.appendChild(listitem)
section.appendChild(params)
section.appendChild(paraElement('Returns:'))
returns = dom.createElement('itemizedlist')
returns.appendChild(paramElement(None, func.returns))
section.appendChild(returns)
print(section.toxml())
| dreibh/planetlab-lxc-plcapi | doc/DocBook.py | Python | bsd-3-clause | 5,382 |
from urlparse import urlparse
from api_tests.nodes.views.test_node_contributors_list import NodeCRUDTestCase
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from tests.base import fake
from osf_tests.factories import (
ProjectFactory,
CommentFactory,
RegistrationFactory,
WithdrawnRegistrationFactory,
)
class TestWithdrawnRegistrations(NodeCRUDTestCase):
def setUp(self):
super(TestWithdrawnRegistrations, self).setUp()
self.registration = RegistrationFactory(creator=self.user, project=self.public_project)
self.withdrawn_registration = WithdrawnRegistrationFactory(registration=self.registration, user=self.registration.creator)
self.public_pointer_project = ProjectFactory(is_public=True)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.withdrawn_url = '/{}registrations/{}/?version=2.2'.format(API_BASE, self.registration._id)
self.withdrawn_registration.justification = 'We made a major error.'
self.withdrawn_registration.save()
def test_can_access_withdrawn_contributors(self):
url = '/{}registrations/{}/contributors/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
def test_cannot_access_withdrawn_children(self):
url = '/{}registrations/{}/children/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_comments(self):
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_comment = CommentFactory(node=self.public_project, user=self.user)
url = '/{}registrations/{}/comments/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_can_access_withdrawn_contributor_detail(self):
url = '/{}registrations/{}/contributors/{}/'.format(API_BASE, self.registration._id, self.user._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
def test_cannot_return_a_withdrawn_registration_at_node_detail_endpoint(self):
url = '/{}nodes/{}/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
def test_cannot_delete_a_withdrawn_registration(self):
url = '/{}registrations/{}/'.format(API_BASE, self.registration._id)
res = self.app.delete_json_api(url, auth=self.user.auth, expect_errors=True)
self.registration.reload()
assert_equal(res.status_code, 405)
def test_cannot_access_withdrawn_files_list(self):
url = '/{}registrations/{}/files/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_node_links_detail(self):
url = '/{}registrations/{}/node_links/{}/'.format(API_BASE, self.registration._id, self.public_pointer._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_node_links_list(self):
url = '/{}registrations/{}/node_links/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_node_logs(self):
self.public_project = ProjectFactory(is_public=True, creator=self.user)
url = '/{}registrations/{}/logs/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannot_access_withdrawn_registrations_list(self):
self.registration.save()
url = '/{}registrations/{}/registrations/'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_withdrawn_registrations_display_limited_fields(self):
registration = self.registration
res = self.app.get(self.withdrawn_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
attributes = res.json['data']['attributes']
registration.reload()
expected_attributes = {
'title': registration.title,
'description': registration.description,
'date_created': registration.date_created.isoformat().replace('+00:00', 'Z'),
'date_registered': registration.registered_date.isoformat().replace('+00:00', 'Z'),
'date_modified': registration.date_modified.isoformat().replace('+00:00', 'Z'),
'withdrawal_justification': registration.retraction.justification,
'public': None,
'category': None,
'registration': True,
'fork': None,
'collection': None,
'tags': None,
'withdrawn': True,
'pending_withdrawal': None,
'pending_registration_approval': None,
'pending_embargo_approval': None,
'embargo_end_date': None,
'registered_meta': None,
'current_user_permissions': None,
'registration_supplement': registration.registered_schema.first().name
}
for attribute in expected_attributes:
assert_equal(expected_attributes[attribute], attributes[attribute])
contributors = urlparse(res.json['data']['relationships']['contributors']['links']['related']['href']).path
assert_equal(contributors, '/{}registrations/{}/contributors/'.format(API_BASE, registration._id))
assert_not_in('children', res.json['data']['relationships'])
assert_not_in('comments', res.json['data']['relationships'])
assert_not_in('node_links', res.json['data']['relationships'])
assert_not_in('registrations', res.json['data']['relationships'])
assert_not_in('parent', res.json['data']['relationships'])
assert_not_in('forked_from', res.json['data']['relationships'])
assert_not_in('files', res.json['data']['relationships'])
assert_not_in('logs', res.json['data']['relationships'])
assert_not_in('registered_by', res.json['data']['relationships'])
assert_not_in('registered_from', res.json['data']['relationships'])
assert_not_in('root', res.json['data']['relationships'])
def test_field_specific_related_counts_ignored_if_hidden_field_on_withdrawn_registration(self):
url = '/{}registrations/{}/?related_counts=children'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_not_in('children', res.json['data']['relationships'])
assert_in('contributors', res.json['data']['relationships'])
def test_field_specific_related_counts_retrieved_if_visible_field_on_withdrawn_registration(self):
url = '/{}registrations/{}/?related_counts=contributors'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['relationships']['contributors']['links']['related']['meta']['count'], 1)
| monikagrabowska/osf.io | api_tests/registrations/views/test_withdrawn_registrations.py | Python | apache-2.0 | 7,865 |
from django.utils.translation import gettext_lazy as _
ModuleTitle = _("components")
Title = _("Components")
Perms = False
Index = "None"
Urls = (
(
"codeeditor?schtml=browser",
_("Code editor"),
None,
"""png://actions/format-justify-center.png""",
),
("d3?schtml=browser", _("D3"), None, """"""),
(
"spreadsheet?schtml=browser",
_("Spreadsheet"),
None,
"""png://apps/accessories-calculator.png""",
),
(
"pivottable?schtml=browser",
_("Pivot table"),
None,
"""png://mimetypes/x-office-spreadsheet.png""",
),
(
"leaflet?schtml=browser",
_("Leaflet"),
None,
"""png://apps/preferences-system-network-proxy.png""",
),
(
"video?schtml=browser",
_("Video"),
None,
"""png://mimetypes/video-x-generic.png""",
),
(
"wysiwyg?schtml=browser",
_("Wysiwyg editor"),
None,
"""png://mimetypes/x-office-document.png""",
),
("xterm?schtml=browser", _("Xterm"), None, """png://apps/utilities-terminal.png"""),
(
"calendar?schtml=browser",
_("Calendar"),
None,
"""png://actions/appointment-new.png""",
),
(
"mask?schtml=browser",
_("Mask edit"),
None,
"""png://actions/format-text-underline.png""",
),
(
"webrtc?schtml=browser",
_("WebRTC"),
None,
"""png://status/network-transmit-receive.png""",
),
(
"time?schtml=browser",
_("Time edit"),
None,
"""png://actions/appointment-new.png""",
),
(
"scrollaction?schtml=browser",
_("Scroll actions"),
None,
"""png://actions/go-down.png""",
),
(
"plotly?schtml=browser",
_("Plotly"),
None,
"""png://mimetypes/x-office-drawing-template.png""",
),
(
"test?schtml=browser",
_("Test"),
None,
"""png://actions/document-properties.png""",
),
("svg?schtml=browser", _("Svg"), None, """png://actions/edit-find-replace.png"""),
("select2?schtml=browser", _("Select2"), None, """png://actions/edit-find.png"""),
("db?schtml=browser", _("Db"), None, """png://actions/address-book-new.png"""),
(
"form?schtml=browser",
_("Form"),
None,
"""png://categories/preferences-desktop.png""",
),
)
UserParam = {}
| Splawik/pytigon | pytigon/prj/schpytigondemo/schcomponents_demo/__init__.py | Python | lgpl-3.0 | 2,497 |
import unittest
from packaging.version import parse
import sdafile
from sdafile.version import version
class Version(unittest.TestCase):
def test_imports(self):
self.assertEqual(sdafile.__version__, version)
def test_pep_440(self):
# Raises InvalidVersion if version does not conform to pep440
parse(version)
| enthought/sandia-data-archive | sdafile/tests/test_version.py | Python | bsd-3-clause | 347 |
import os
import tempfile
import shutil
import sys
import webtest
import time
import threading
from io import BytesIO
from pywb.webapp.pywb_init import create_wb_router
from pywb.manager.manager import main
import pywb.manager.autoindex
from pywb.warc.cdxindexer import main as cdxindexer_main
from pywb import get_test_dir
from pywb.framework.wsgi_wrappers import init_app
from pywb.webapp.views import J2TemplateView
from pytest import raises
from mock import patch
#=============================================================================
ARCHIVE_DIR = 'archive'
INDEX_DIR = 'indexes'
INDEX_FILE = 'index.cdxj'
AUTOINDEX_FILE = 'autoindex.cdxj'
#=============================================================================
root_dir = None
orig_cwd = None
def setup_module():
global root_dir
root_dir = tempfile.mkdtemp()
global orig_cwd
orig_cwd = os.getcwd()
os.chdir(root_dir)
# use actually set dir
root_dir = os.getcwd()
def teardown_module():
global orig_cwd
os.chdir(orig_cwd)
global root_dir
shutil.rmtree(root_dir)
#=============================================================================
class TestManagedColls(object):
def setup(self):
global root_dir
self.root_dir = root_dir
def _create_app(self):
self.app = init_app(create_wb_router)
self.testapp = webtest.TestApp(self.app)
def _check_dirs(self, base, dirlist):
for dir_ in dirlist:
assert os.path.isdir(os.path.join(base, dir_))
def _get_sample_warc(self, name):
return os.path.join(get_test_dir(), 'warcs', name)
def teardown(self):
J2TemplateView.shared_jinja_env = None
@patch('waitress.serve', lambda *args, **kwargs: None)
def test_run_cli(self):
""" test new wayback cli interface
test autoindex error before collections inited
"""
from pywb.apps.cli import wayback
wayback([])
# Nothing to auto-index.. yet
with raises(SystemExit):
wayback(['-a'])
colls = os.path.join(self.root_dir, 'collections')
os.mkdir(colls)
pywb.manager.autoindex.keep_running = False
wayback(['-a'])
def test_create_first_coll(self):
""" Test first collection creation, with all required dirs
"""
main(['init', 'test'])
colls = os.path.join(self.root_dir, 'collections')
assert os.path.isdir(colls)
test = os.path.join(colls, 'test')
assert os.path.isdir(test)
self._check_dirs(test, [INDEX_DIR, ARCHIVE_DIR, 'static', 'templates'])
def test_add_warcs(self):
""" Test adding warc to new coll, check replay
"""
warc1 = self._get_sample_warc('example.warc.gz')
main(['add', 'test', warc1])
self._create_app()
resp = self.testapp.get('/test/20140103030321/http://example.com?example=1')
assert resp.status_int == 200
def test_another_coll(self):
""" Test adding warc to a new coll, check replay
"""
warc1 = self._get_sample_warc('example.warc.gz')
main(['init', 'foo'])
main(['add', 'foo', warc1])
self._create_app()
resp = self.testapp.get('/foo/20140103030321/http://example.com?example=1')
assert resp.status_int == 200
def test_add_more_warcs(self):
""" Test adding additional warcs, check replay of added content
"""
warc1 = self._get_sample_warc('iana.warc.gz')
warc2 = self._get_sample_warc('example-extra.warc')
main(['add', 'test', warc1, warc2])
# Spurrious file in collections
with open(os.path.join(self.root_dir, 'collections', 'blah'), 'w+b') as fh:
fh.write('foo\n')
with raises(IOError):
main(['add', 'test', 'non-existent-file.warc.gz'])
# check new cdx
self._create_app()
resp = self.testapp.get('/test/20140126200624/http://www.iana.org/')
assert resp.status_int == 200
def test_add_custom_nested_warcs(self):
""" Test recursive indexing of custom created WARC hierarchy,
warcs/A/..., warcs/B/sub/...
Ensure CDX is relative to root archive dir, test replay
"""
main(['init', 'nested'])
nested_root = os.path.join(self.root_dir, 'collections', 'nested', ARCHIVE_DIR)
nested_a = os.path.join(nested_root, 'A')
nested_b = os.path.join(nested_root, 'B', 'sub')
os.makedirs(nested_a)
os.makedirs(nested_b)
warc1 = self._get_sample_warc('iana.warc.gz')
warc2 = self._get_sample_warc('example.warc.gz')
shutil.copy2(warc1, nested_a)
shutil.copy2(warc2, nested_b)
main(['index',
'nested',
os.path.join(nested_a, 'iana.warc.gz'),
os.path.join(nested_b, 'example.warc.gz')
])
nested_cdx = os.path.join(self.root_dir, 'collections', 'nested', INDEX_DIR, INDEX_FILE)
with open(nested_cdx) as fh:
nested_cdx_index = fh.read()
assert '1043' in nested_cdx_index
assert '333' in nested_cdx_index
assert 'B/sub/example.warc.gz' in nested_cdx_index
assert '2258' in nested_cdx_index
assert '334' in nested_cdx_index
assert 'A/iana.warc.gz' in nested_cdx_index
self._create_app()
resp = self.testapp.get('/nested/20140126200624/http://www.iana.org/')
assert resp.status_int == 200
resp = self.testapp.get('/nested/20140103030321/http://example.com?example=1')
assert resp.status_int == 200
def test_merge_vs_reindex_equality(self):
""" Test full reindex vs merged update when adding warcs
to ensure equality of indexes
"""
# ensure merged index is same as full reindex
coll_dir = os.path.join(self.root_dir, 'collections', 'test', INDEX_DIR)
orig = os.path.join(coll_dir, INDEX_FILE)
bak = os.path.join(coll_dir, 'index.bak')
shutil.copy(orig, bak)
main(['reindex', 'test'])
with open(orig) as orig_fh:
merged_cdx = orig_fh.read()
with open(bak) as bak_fh:
reindex_cdx = bak_fh.read()
assert len(reindex_cdx.splitlines()) == len(merged_cdx.splitlines())
assert merged_cdx == reindex_cdx
def test_add_static(self):
""" Test adding static file to collection, check access
"""
a_static = os.path.join(self.root_dir, 'collections', 'test', 'static', 'abc.js')
with open(a_static, 'w+b') as fh:
fh.write('/* Some JS File */')
self._create_app()
resp = self.testapp.get('/static/test/abc.js')
assert resp.status_int == 200
assert resp.content_type == 'application/javascript'
assert '/* Some JS File */' in resp.body
def test_add_shared_static(self):
""" Test adding shared static file to root static/ dir, check access
"""
a_static = os.path.join(self.root_dir, 'static', 'foo.css')
with open(a_static, 'w+b') as fh:
fh.write('/* Some CSS File */')
self._create_app()
resp = self.testapp.get('/static/__shared/foo.css')
assert resp.status_int == 200
assert resp.content_type == 'text/css'
assert '/* Some CSS File */' in resp.body
def test_add_title_metadata_index_page(self):
""" Test adding title metadata to a collection, test
retrieval on default index page
"""
main(['metadata', 'foo', '--set', 'title=Collection Title'])
self._create_app()
resp = self.testapp.get('/')
assert resp.status_int == 200
assert resp.content_type == 'text/html'
assert '(Collection Title)' in resp.body
def test_other_metadata_search_page(self):
main(['metadata', 'foo', '--set',
'desc=Some Description Text',
'other=custom value'])
with raises(ValueError):
main(['metadata', 'foo', '--set', 'name_only'])
self._create_app()
resp = self.testapp.get('/foo/')
assert resp.status_int == 200
assert resp.content_type == 'text/html'
assert 'Collection Title' in resp.body
assert 'desc' in resp.body
assert 'Some Description Text' in resp.body
assert 'other' in resp.body
assert 'custom value' in resp.body
def test_custom_template_search(self):
""" Test manually added custom search template search.html
"""
a_static = os.path.join(self.root_dir, 'collections', 'test', 'templates', 'search.html')
with open(a_static, 'w+b') as fh:
fh.write('pywb custom search page')
self._create_app()
resp = self.testapp.get('/test/')
assert resp.status_int == 200
assert resp.content_type == 'text/html'
assert 'pywb custom search page' in resp.body
def test_custom_config(self):
""" Test custom created config.yaml which overrides auto settings
Template is relative to collection-specific dir
Add custom metadata and test its presence in custom search page
"""
config_path = os.path.join(self.root_dir, 'collections', 'test', 'config.yaml')
with open(config_path, 'w+b') as fh:
fh.write('search_html: ./templates/custom_search.html\n')
fh.write('index_paths: ./cdx2/\n')
custom_search = os.path.join(self.root_dir, 'collections', 'test',
'templates', 'custom_search.html')
# add metadata
main(['metadata', 'test', '--set', 'some=value'])
with open(custom_search, 'w+b') as fh:
fh.write('config.yaml overriden search page: ')
fh.write('{{ wbrequest.user_metadata | tojson }}\n')
os.rename(os.path.join(self.root_dir, 'collections', 'test', INDEX_DIR),
os.path.join(self.root_dir, 'collections', 'test', 'cdx2'))
self._create_app()
resp = self.testapp.get('/test/')
assert resp.status_int == 200
assert resp.content_type == 'text/html'
assert 'config.yaml overriden search page: {"some": "value"}' in resp.body
resp = self.testapp.get('/test/20140103030321/http://example.com?example=1')
assert resp.status_int == 200
def test_add_default_coll_templates(self):
""" Test add default templates: collection,
and overwrite collection template
"""
# list
main(['template', 'foo', '--list'])
# Add collection template
main(['template', 'foo', '--add', 'query_html'])
assert os.path.isfile(os.path.join(self.root_dir, 'collections', 'foo', 'templates', 'query.html'))
# overwrite -- force
main(['template', 'foo', '--add', 'query_html', '-f'])
def test_add_modify_home_template(self):
# Add shared template
main(['template', '--add', 'home_html'])
filename = os.path.join(self.root_dir, 'templates', 'index.html')
assert os.path.isfile(filename)
with open(filename, 'r+b') as fh:
buf = fh.read()
buf = buf.replace('</html>', 'Custom Test Homepage</html>')
fh.seek(0)
fh.write(buf)
self._create_app()
resp = self.testapp.get('/')
assert resp.content_type == 'text/html'
assert 'Custom Test Homepage</html>' in resp.body, resp.body
@patch('pywb.manager.manager.get_input', lambda x: 'y')
def test_add_template_input_yes(self):
""" Test answer 'yes' to overwrite
"""
main(['template', 'foo', '--add', 'query_html'])
@patch('pywb.manager.manager.get_input', lambda x: 'n')
def test_add_template_input_no(self):
""" Test answer 'no' to overwrite
"""
with raises(IOError):
main(['template', 'foo', '--add', 'query_html'])
@patch('pywb.manager.manager.get_input', lambda x: 'other')
def test_add_template_input_other(self):
""" Test answer 'other' to overwrite
"""
with raises(IOError):
main(['template', 'foo', '--add', 'query_html'])
@patch('pywb.manager.manager.get_input', lambda x: 'no')
def test_remove_not_confirm(self):
""" Test answer 'no' to remove
"""
# don't remove -- not confirmed
with raises(IOError):
main(['template', 'foo', '--remove', 'query_html'])
@patch('pywb.manager.manager.get_input', lambda x: 'yes')
def test_remove_confirm(self):
# remove -- confirm
main(['template', 'foo', '--remove', 'query_html'])
def test_no_templates(self):
""" Test removing templates dir, using default template again
"""
shutil.rmtree(os.path.join(self.root_dir, 'collections', 'foo', 'templates'))
self._create_app()
resp = self.testapp.get('/foo/')
assert resp.status_int == 200
assert resp.content_type == 'text/html'
assert 'pywb custom search page' not in resp.body
def test_list_colls(self):
""" Test collection listing, printed to stdout
"""
orig_stdout = sys.stdout
buff = BytesIO()
sys.stdout = buff
try:
main(['list'])
finally:
sys.stdout = orig_stdout
output = sorted(buff.getvalue().splitlines())
assert len(output) == 4
assert 'Collections:' in output
assert '- foo' in output
assert '- nested' in output
assert '- test' in output
def test_convert_cdx(self):
""" Create non-surt cdx, then convert to cdxj
"""
migrate_dir = os.path.join(self.root_dir, '_migrate')
os.mkdir(migrate_dir)
cdxindexer_main(['-u', migrate_dir, self._get_sample_warc('')])
# try one file with -9
cdxindexer_main(['-u', '-9', migrate_dir, self._get_sample_warc('example.warc.gz')])
cdxs = os.listdir(migrate_dir)
assert all(x.endswith('.cdx') for x in cdxs)
@patch('pywb.manager.manager.get_input', lambda x: 'blah')
def do_migrate_no():
main(['convert-cdx', migrate_dir])
do_migrate_no()
assert os.listdir(migrate_dir) == cdxs
@patch('pywb.manager.manager.get_input', lambda x: 'y')
def do_migrate_yes():
main(['convert-cdx', migrate_dir])
do_migrate_yes()
cdxjs = os.listdir(migrate_dir)
assert len(cdxs) == len(cdxjs)
assert all(x.endswith('.cdxj') for x in cdxjs)
with open(os.path.join(migrate_dir, 'iana.cdxj')) as fh:
assert fh.readline().startswith('org,iana)/ 20140126200624 {"url": "http://www.iana.org/",')
# Nothing else to migrate
main(['convert-cdx', migrate_dir])
def test_auto_index(self):
main(['init', 'auto'])
auto_dir = os.path.join(self.root_dir, 'collections', 'auto')
archive_dir = os.path.join(auto_dir, ARCHIVE_DIR)
archive_sub_dir = os.path.join(archive_dir, 'sub')
os.makedirs(archive_sub_dir)
pywb.manager.autoindex.keep_running = True
def do_copy():
try:
time.sleep(1)
shutil.copy(self._get_sample_warc('example.warc.gz'), archive_dir)
shutil.copy(self._get_sample_warc('example-extra.warc'), archive_sub_dir)
time.sleep(1)
finally:
pywb.manager.autoindex.keep_running = False
thread = threading.Thread(target=do_copy)
thread.daemon = True
thread.start()
main(['autoindex'])
thread.join()
index_file = os.path.join(auto_dir, INDEX_DIR, AUTOINDEX_FILE)
assert os.path.isfile(index_file)
with open(index_file) as fh:
index = fh.read()
assert '"example.warc.gz' in index
assert '"sub/example-extra.warc' in index, index
mtime = os.path.getmtime(index_file)
# Update
pywb.manager.autoindex.keep_running = True
os.remove(index_file)
thread = threading.Thread(target=do_copy)
thread.daemon = True
thread.start()
main(['autoindex', 'auto'])
thread.join()
# assert file was update
assert os.path.getmtime(index_file) > mtime
def test_err_template_remove(self):
""" Test various error conditions for templates:
invalid template name, no collection for collection template
no template file found
"""
# no such template
with raises(KeyError):
main(['template', 'foo', '--remove', 'blah_html'])
# collection needed
with raises(IOError):
main(['template', '--remove', 'query_html'])
# already removed
with raises(IOError):
main(['template', 'foo', '--remove', 'query_html'])
def test_err_no_such_coll(self):
""" Test error adding warc to non-existant collection
"""
warc1 = self._get_sample_warc('example.warc.gz')
with raises(IOError):
main(['add', 'bar', warc1])
def test_err_wrong_warcs(self):
warc1 = self._get_sample_warc('example.warc.gz')
invalid_warc = os.path.join(self.root_dir, 'collections', 'test', ARCHIVE_DIR, 'invalid.warc.gz')
# Empty warc list, argparse calls exit
with raises(SystemExit):
main(['index', 'test'])
# Wrong paths not in collection
with raises(IOError):
main(['index', 'test', warc1])
# Non-existent
with raises(IOError):
main(['index', 'test', invalid_warc])
def test_err_invalid_name(self):
""" Invalid collection name
"""
with raises(ValueError):
main(['init', '../abc%'])
with raises(ValueError):
main(['init', '45^23'])
def test_err_missing_dirs(self):
""" Test various errors with missing warcs dir,
missing cdx dir, non dir cdx file, and missing collections root
"""
colls = os.path.join(self.root_dir, 'collections')
# No Statics -- ignorable
shutil.rmtree(os.path.join(colls, 'foo', 'static'))
self._create_app()
# No WARCS
warcs_path = os.path.join(colls, 'foo', ARCHIVE_DIR)
shutil.rmtree(warcs_path)
with raises(IOError):
main(['add', 'foo', 'somewarc'])
# No CDX
cdx_path = os.path.join(colls, 'foo', INDEX_DIR)
shutil.rmtree(cdx_path)
with raises(Exception):
self._create_app()
# CDX a file not a dir
with open(cdx_path, 'w+b') as fh:
fh.write('foo\n')
with raises(Exception):
self._create_app()
shutil.rmtree(colls)
# No Collections to list
with raises(IOError):
main(['list'])
# No Collections
self._create_app()
resp = self.testapp.get('/test/', status=404)
assert resp.status_int == 404
| machawk1/pywb | tests/test_auto_colls.py | Python | gpl-3.0 | 19,169 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.