code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
from ceph_deploy.util import templates
from ceph_deploy.lib import remoto
from ceph_deploy.hosts.common import map_components
from ceph_deploy.util.paths import gpg
NON_SPLIT_PACKAGES = ['ceph-osd', 'ceph-mon', 'ceph-mds']
def rpm_dist(distro):
if distro.normalized_name in ['redhat', 'centos', 'scientific'] and distro.normalized_release.int_major >= 6:
return 'el' + distro.normalized_release.major
return 'el6'
def repository_url_part(distro):
"""
Historically everything CentOS, RHEL, and Scientific has been mapped to
`el6` urls, but as we are adding repositories for `rhel`, the URLs should
map correctly to, say, `rhel6` or `rhel7`.
This function looks into the `distro` object and determines the right url
part for the given distro, falling back to `el6` when all else fails.
Specifically to work around the issue of CentOS vs RHEL::
>>> import platform
>>> platform.linux_distribution()
('Red Hat Enterprise Linux Server', '7.0', 'Maipo')
"""
if distro.normalized_release.int_major >= 6:
if distro.normalized_name == 'redhat':
return 'rhel' + distro.normalized_release.major
if distro.normalized_name in ['centos', 'scientific']:
return 'el' + distro.normalized_release.major
return 'el6'
def install(distro, version_kind, version, adjust_repos, **kw):
packages = map_components(
NON_SPLIT_PACKAGES,
kw.pop('components', [])
)
logger = distro.conn.logger
release = distro.release
machine = distro.machine_type
repo_part = repository_url_part(distro)
dist = rpm_dist(distro)
distro.packager.clean()
# Get EPEL installed before we continue:
if adjust_repos:
distro.packager.install('epel-release')
distro.packager.install('yum-plugin-priorities')
distro.conn.remote_module.enable_yum_priority_obsoletes()
logger.warning('check_obsoletes has been enabled for Yum priorities plugin')
if version_kind in ['stable', 'testing']:
key = 'release'
else:
key = 'autobuild'
if adjust_repos:
if version_kind != 'dev':
distro.packager.add_repo_gpg_key(gpg.url(key))
if version_kind == 'stable':
url = 'http://ceph.com/rpm-{version}/{repo}/'.format(
version=version,
repo=repo_part,
)
elif version_kind == 'testing':
url = 'http://ceph.com/rpm-testing/{repo}/'.format(repo=repo_part)
remoto.process.run(
distro.conn,
[
'rpm',
'-Uvh',
'--replacepkgs',
'{url}noarch/ceph-release-1-0.{dist}.noarch.rpm'.format(url=url, dist=dist),
],
)
if version_kind == 'dev':
logger.info('skipping install of ceph-release package')
logger.info('repo file will be created manually')
mirror_install(
distro,
'http://gitbuilder.ceph.com/ceph-rpm-centos{release}-{machine}-basic/ref/{version}/'.format(
release=release.split(".", 1)[0],
machine=machine,
version=version),
gpg.url(key),
adjust_repos=True,
extra_installs=False
)
# set the right priority
logger.warning('ensuring that /etc/yum.repos.d/ceph.repo contains a high priority')
distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source'])
logger.warning('altered ceph.repo priorities to contain: priority=1')
if packages:
distro.packager.install(packages)
def mirror_install(distro, repo_url, gpg_url, adjust_repos, extra_installs=True, **kw):
packages = map_components(
NON_SPLIT_PACKAGES,
kw.pop('components', [])
)
repo_url = repo_url.strip('/') # Remove trailing slashes
distro.packager.clean()
if adjust_repos:
distro.packager.add_repo_gpg_key(gpg_url)
ceph_repo_content = templates.ceph_repo.format(
repo_url=repo_url,
gpg_url=gpg_url
)
distro.conn.remote_module.write_yum_repo(ceph_repo_content)
# set the right priority
if distro.packager.name == 'yum':
distro.packager.install('yum-plugin-priorities')
distro.conn.remote_module.set_repo_priority(['Ceph', 'Ceph-noarch', 'ceph-source'])
distro.conn.logger.warning('altered ceph.repo priorities to contain: priority=1')
if extra_installs and packages:
distro.packager.install(packages)
def repo_install(distro, reponame, baseurl, gpgkey, **kw):
packages = map_components(
NON_SPLIT_PACKAGES,
kw.pop('components', [])
)
logger = distro.conn.logger
# Get some defaults
name = kw.pop('name', '%s repo' % reponame)
enabled = kw.pop('enabled', 1)
gpgcheck = kw.pop('gpgcheck', 1)
install_ceph = kw.pop('install_ceph', False)
proxy = kw.pop('proxy', '') # will get ignored if empty
_type = 'repo-md'
baseurl = baseurl.strip('/') # Remove trailing slashes
distro.packager.clean()
if gpgkey:
distro.packager.add_repo_gpg_key(gpgkey)
repo_content = templates.custom_repo(
reponame=reponame,
name=name,
baseurl=baseurl,
enabled=enabled,
gpgcheck=gpgcheck,
_type=_type,
gpgkey=gpgkey,
proxy=proxy,
**kw
)
distro.conn.remote_module.write_yum_repo(
repo_content,
"%s.repo" % reponame
)
repo_path = '/etc/yum.repos.d/{reponame}.repo'.format(reponame=reponame)
# set the right priority
if kw.get('priority'):
if distro.packager.name == 'yum':
distro.packager.install('yum-plugin-priorities')
distro.conn.remote_module.set_repo_priority([reponame], repo_path)
logger.warning('altered {reponame}.repo priorities to contain: priority=1'.format(
reponame=reponame)
)
# Some custom repos do not need to install ceph
if install_ceph and packages:
distro.packager.install(packages)
| branto1/ceph-deploy | ceph_deploy/hosts/centos/install.py | Python | mit | 6,289 |
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'assembly.settings.local')
app = Celery('assembly')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
app.conf.update(
CELERY_RESULT_BACKEND='djcelery.backends.database:DatabaseBackend',
) | Karolain/cms | assembly/celery.py | Python | mit | 408 |
#!/usr/bin/env python
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
try:
from django.core.management import execute_from_command_line
execute_from_command_line()
except ImportError:
from django.core.management import execute_manager
execute_manager(settings)
| CBien/django-alert | test_project/manage.py | Python | mit | 698 |
import unittest
import ctypes
import gc
MyCallback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int)
OtherCallback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_ulonglong)
import _ctypes_test
dll = ctypes.CDLL(_ctypes_test.__file__)
class RefcountTestCase(unittest.TestCase):
def test_1(self):
from sys import getrefcount as grc
f = dll._testfunc_callback_i_if
f.restype = ctypes.c_int
f.argtypes = [ctypes.c_int, MyCallback]
def callback(value):
#print "called back with", value
return value
self.assertEqual(grc(callback), 2)
cb = MyCallback(callback)
self.assertTrue(grc(callback) > 2)
result = f(-10, cb)
self.assertEqual(result, -18)
cb = None
gc.collect()
self.assertEqual(grc(callback), 2)
def test_refcount(self):
from sys import getrefcount as grc
def func(*args):
pass
# this is the standard refcount for func
self.assertEqual(grc(func), 2)
# the CFuncPtr instance holds atr least one refcount on func:
f = OtherCallback(func)
self.assertTrue(grc(func) > 2)
# and may release it again
del f
self.assertTrue(grc(func) >= 2)
# but now it must be gone
gc.collect()
self.assertTrue(grc(func) == 2)
class X(ctypes.Structure):
_fields_ = [("a", OtherCallback)]
x = X()
x.a = OtherCallback(func)
# the CFuncPtr instance holds atr least one refcount on func:
self.assertTrue(grc(func) > 2)
# and may release it again
del x
self.assertTrue(grc(func) >= 2)
# and now it must be gone again
gc.collect()
self.assertEqual(grc(func), 2)
f = OtherCallback(func)
# the CFuncPtr instance holds atr least one refcount on func:
self.assertTrue(grc(func) > 2)
# create a cycle
f.cycle = f
del f
gc.collect()
self.assertEqual(grc(func), 2)
class AnotherLeak(unittest.TestCase):
def test_callback(self):
import sys
proto = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_int)
def func(a, b):
return a * b * 2
f = proto(func)
gc.collect()
a = sys.getrefcount(ctypes.c_int)
f(1, 2)
self.assertEqual(sys.getrefcount(ctypes.c_int), a)
if __name__ == '__main__':
unittest.main()
| sauloal/pycluster | pypy-1.9_64/lib-python/2.7/ctypes/test/test_refcounts.py | Python | mit | 2,502 |
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from collections import OrderedDict
import logging
import re
import sys
import json
from django.conf import settings
from django.utils.encoding import smart_text
from contrib import drf_introspection
from django.db.models.fields import NOT_PROVIDED
from django.core.urlresolvers import NoReverseMatch
from django.core.exceptions import FieldDoesNotExist
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.utils import formatting
from rest_framework.reverse import reverse
from rest_framework import serializers, relations, fields
from pdc.apps.utils.utils import urldecode
"""
## Writing documentation in docstrings
Docstrings of each method will be available in browsable API as documentation.
These features are available to simplify writing the comments:
* the content is formatted as Markdown
* %(HOST_NAME)s and %(API_ROOT)s macros will be replaced by host name and URL
fragment for API, respectively
* %(FILTERS)s will be replaced a by a list of available query string filters
* %(SERIALIZER)s will be replaced by a code block with details about
serializer
* %(WRITABLE_SERIALIZER)s will do the same, but without read-only fields
* $URL:route-name:arg1:arg2...$ will be replaced by absolute URL
* $LINK:route-name:arg1:...$ will be replaced by a clickable link with
relative URL pointing to the specified place; arguments for LINK will be
wrapped in braces automatically
When the URL specification can not be resolve, "BAD URL" will be displayed on
the page and details about the error will be logged to the error log.
"""
PDC_APIROOT_DOC = """
The REST APIs make it possible to programmatic access the data in Product Definition Center(a.k.a. PDC).
Create new Product, import rpms and query components with contact informations, and more.
The REST API identifies users using Token which will be generated for all authenticated users.
**Please remember to use your token as HTTP header for every requests that need authentication.**
If you want to record the reason for change, you can add Header (-H "PDC-Change-Comment: reasonforchange") in request.
Responses are available in JSON format.
**NOTE:** in order to use secure HTTPS connections, you'd better to add server's certificate as trusted.
"""
URL_SPEC_RE = re.compile(r'\$(?P<type>URL|LINK):(?P<details>[^$]+)\$')
class ReadOnlyBrowsableAPIRenderer(BrowsableAPIRenderer):
template = "browsable_api/api.html"
methods_mapping = (
'list',
'retrieve',
'create',
'bulk_create',
'update',
'destroy',
'bulk_destroy',
'partial_update',
'bulk_update',
# Token Auth methods
'obtain',
'refresh',
)
def get_raw_data_form(self, data, view, method, request):
return None
def get_rendered_html_form(self, data, view, method, request):
return None
def get_context(self, data, accepted_media_type, renderer_context):
self.request = renderer_context['request']
super_class = super(ReadOnlyBrowsableAPIRenderer, self)
super_retval = super_class.get_context(data, accepted_media_type,
renderer_context)
if super_retval is not None:
del super_retval['put_form']
del super_retval['post_form']
del super_retval['delete_form']
del super_retval['options_form']
del super_retval['raw_data_put_form']
del super_retval['raw_data_post_form']
del super_retval['raw_data_patch_form']
del super_retval['raw_data_put_or_patch_form']
super_retval['display_edit_forms'] = False
super_retval['version'] = "1.0"
view = renderer_context['view']
super_retval['overview'] = self.get_overview(view)
return super_retval
def get_overview(self, view):
if view.__class__.__name__ == 'APIRoot':
return self.format_docstring(None, None, PDC_APIROOT_DOC)
overview = view.__doc__ or ''
return self.format_docstring(view, '<overview>', overview)
def get_description(self, view, *args):
if view.__class__.__name__ == 'APIRoot':
return ''
description = OrderedDict()
for method in self.methods_mapping:
func = getattr(view, method, None)
docstring = func and func.__doc__ or ''
if docstring:
description[method] = self.format_docstring(view, method, docstring)
return description
def format_docstring(self, view, method, docstring):
macros = settings.BROWSABLE_DOCUMENT_MACROS
if view:
macros['FILTERS'] = get_filters(view)
if '%(SERIALIZER)s' in docstring:
macros['SERIALIZER'] = get_serializer(view, include_read_only=True)
if '%(WRITABLE_SERIALIZER)s' in docstring:
macros['WRITABLE_SERIALIZER'] = get_serializer(view, include_read_only=False)
if hasattr(view, 'docstring_macros'):
macros.update(view.docstring_macros)
string = formatting.dedent(docstring)
formatted = string % macros
formatted = self.substitute_urls(view, method, formatted)
string = smart_text(formatted)
return formatting.markup_description(string)
def substitute_urls(self, view, method, text):
def replace_url(match):
type = match.groupdict()['type']
parts = match.groupdict()['details'].split(':')
url_name = parts[0]
args = parts[1:]
if type == 'LINK':
args = ['{%s}' % arg for arg in args]
try:
if type == 'LINK':
url = reverse(url_name, args=args)
return '[`%s`](%s)' % (urldecode(url), url)
return reverse(url_name, args=args, request=self.request)
except NoReverseMatch:
logger = logging.getLogger(__name__)
logger.error('Bad URL specifier <%s> in %s.%s'
% (match.group(0), view.__class__.__name__, method),
exc_info=sys.exc_info())
return 'BAD URL'
return URL_SPEC_RE.sub(replace_url, text)
FILTERS_CACHE = {}
FILTER_DEFS = {
'CharFilter': 'string',
'NullableCharFilter': 'string | null',
'BooleanFilter': 'bool',
'CaseInsensitiveBooleanFilter': 'bool',
'ActiveReleasesFilter': 'bool',
'MultiIntFilter': 'int',
}
LOOKUP_TYPES = {
'icontains': 'case insensitive, substring match',
'contains': 'substring match',
'iexact': 'case insensitive',
}
def get_filters(view):
"""
For a given view set returns which query filters are available for it a
Markdown formatted list. The list does not include query filters specified
on serializer or query arguments used for paging.
"""
if view in FILTERS_CACHE:
return FILTERS_CACHE[view]
allowed_keys = drf_introspection.get_allowed_query_params(view)
filter_class = getattr(view, 'filter_class', None)
filterset = filter_class() if filter_class is not None else None
filterset_fields = filterset.filters if filterset is not None else []
filter_fields = set(getattr(view, 'filter_fields', []))
extra_query_params = set(getattr(view, 'extra_query_params', []))
filters = []
for key in sorted(allowed_keys):
if key in filterset_fields:
# filter defined in FilterSet
filter = filterset_fields.get(key)
filter_type = FILTER_DEFS.get(filter.__class__.__name__, 'string')
lookup_type = LOOKUP_TYPES.get(filter.lookup_type)
if lookup_type:
lookup_type = ', %s' % lookup_type
filters.append(' * `%s` (%s%s)' % (key, filter_type, lookup_type or ''))
elif key in filter_fields or key in extra_query_params:
# filter defined in viewset directly; type depends on model, not easily available
filters.append(' * `%s`' % key)
# else filter defined somewhere else and not relevant here (e.g.
# serializer or pagination settings).
filters = '\n'.join(filters)
FILTERS_CACHE[view] = filters
return filters
SERIALIZERS_CACHE = {}
SERIALIZER_DEFS = {
'BooleanField': 'boolean',
'NullBooleanField': 'boolean',
'CharField': 'string',
'IntegerField': 'int',
'HyperlinkedIdentityField': 'url',
'DateTimeField': 'datetime',
'DateField': 'date',
'StringRelatedField': 'string',
'ReadOnlyField': 'data',
'EmailField': 'email address',
'SlugField': 'string',
'URLField': 'url',
}
def _get_type_from_str(str, default=None):
"""
Convert docstring into object suitable for inclusion as documentation. It
tries to parse the docstring as JSON, falling back on provided default
value.
"""
if str:
try:
return json.loads(str)
except ValueError:
pass
return default if default is not None else str
def _get_details_for_slug(serializer, field_name, field):
"""
For slug field, we ideally want to get Model.field format. However, in some
cases getting the model name is not possible, and only field name is
displayed.
"""
model = ''
if hasattr(field, 'queryset') and field.queryset:
model = field.queryset.model.__name__ + '.'
return '%s%s' % (model, field.slug_field)
def get_field_type(serializer, field_name, field, include_read_only):
"""
Try to describe a field type.
"""
if isinstance(field, (relations.ManyRelatedField, serializers.ListSerializer)):
# Many field, recurse on child and make it a list
if isinstance(field, relations.ManyRelatedField):
field = field.child_relation
else:
field = field.child
return [get_field_type(serializer, field_name, field, include_read_only)]
if field.__class__.__name__ in SERIALIZER_DEFS:
return SERIALIZER_DEFS[field.__class__.__name__]
elif isinstance(field, serializers.SlugRelatedField):
return _get_details_for_slug(serializer, field_name, field)
elif isinstance(field, serializers.SerializerMethodField):
# For method fields try to use docstring of the method.
method_name = field.method_name or 'get_{field_name}'.format(field_name=field_name)
method = getattr(serializer, method_name, None)
if method:
docstring = getattr(method, '__doc__')
return _get_type_from_str(docstring, docstring or 'method')
elif not include_read_only and hasattr(field, 'writable_doc_format'):
return _get_type_from_str(field.writable_doc_format)
elif hasattr(field, 'doc_format'):
return _get_type_from_str(field.doc_format)
elif isinstance(field, serializers.BaseSerializer):
return describe_serializer(field, include_read_only)
logger = logging.getLogger(__name__)
logger.error('Undocumented field %s' % field)
return 'UNKNOWN'
def get_default_value(serializer, field_name, field):
"""
Try to get default value for a field and format it nicely.
"""
value = field.default
if hasattr(value, 'doc_format'):
return (value.doc_format
if isinstance(value.doc_format, basestring)
else str(value.doc_format))
if value == fields.empty:
# Try to get default from model field.
try:
default = serializer.Meta.model._meta.get_field(field_name).default
return default if default != NOT_PROVIDED else None
except (FieldDoesNotExist, AttributeError):
return None
return value
def describe_serializer(serializer, include_read_only):
"""
Try to get description of a serializer. It tries to inspect all fields
separately, if the serializer does not have fields, it falls back to
`doc_format` class attribute (if present). If all fails, an error is
logged.
"""
data = {}
if hasattr(serializer, 'get_fields'):
for field_name, field in serializer.get_fields().iteritems():
notes = []
if field.read_only:
notes.append('read-only')
if not include_read_only:
continue
elif not field.required:
notes.append('optional')
default = json.dumps(get_default_value(serializer, field_name, field))
if not (default is None and field.allow_null):
notes.append('default=%s' % default)
if field.allow_null:
notes.append('nullable')
notes = ' (%s)' % ', '.join(notes) if notes else ''
data[field_name + notes] = get_field_type(serializer, field_name, field, include_read_only)
return data
elif hasattr(serializer.__class__, 'doc_format'):
return serializer.doc_format
else:
logger = logging.getLogger(__name__)
logger.error('Failed to get details for serializer %s' % serializer)
return 'data'
def get_serializer(view, include_read_only):
"""
For given view, return a Markdown code block with JSON description of the
serializer. If `include_read_only` is `False`, only writable fields will be
included.
"""
if (view, include_read_only) in SERIALIZERS_CACHE:
return SERIALIZERS_CACHE[(view, include_read_only)]
if not hasattr(view, 'get_serializer'):
return None
try:
serializer = view.get_serializer()
desc = json.dumps(describe_serializer(serializer, include_read_only),
indent=4, sort_keys=True)
doc = '\n'.join(' %s' % line for line in desc.split('\n'))
except AssertionError:
# Even when `get_serializer` is present, it may raise exception.
doc = None
SERIALIZERS_CACHE[(view, include_read_only)] = doc
return doc
| lao605/product-definition-center | pdc/apps/common/renderers.py | Python | mit | 14,144 |
# -*- coding: utf-8 -*-
""" Translation API
@copyright: 2012-14 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import parser
import token
from gluon import current
from gluon.languages import read_dict, write_dict
"""
List of classes with description :
TranslateAPI : API class to retrieve strings and files by module
TranslateGetFiles : Class to traverse the eden directory and
categorize files based on module
TranslateParseFiles : Class to extract strings to translate from code files
TranslateReadFiles : Class to open a file, read its contents and build
a parse tree (for .py files) or use regex
(for html/js files) to obtain a list of strings
by calling methods from TranslateParseFiles
Strings : Class to manipulate strings and their files
Pootle : Class to synchronise a Pootle server's translation
with the local one
TranslateReportStatus : Class to report the translated percentage of each
language file for each module. It also updates
these percentages as and when required
"""
# =============================================================================
class TranslateAPI:
"""
API class for the Translation module to get
files, modules and strings individually
"""
core_modules = ["auth", "default", "errors", "appadmin"]
def __init__(self):
self.grp = TranslateGetFiles()
self.grp.group_files(current.request.folder)
# ---------------------------------------------------------------------
@staticmethod
def get_langcodes():
""" Return a list of language codes """
lang_list = []
langdir = os.path.join(current.request.folder, "languages")
files = os.listdir(langdir)
for f in files:
lang_list.append(f[:-3])
return lang_list
# ---------------------------------------------------------------------
def get_modules(self):
""" Return a list of modules """
return self.grp.modlist
# ---------------------------------------------------------------------
def get_strings_by_module(self, module):
""" Return a list of strings corresponding to a module """
grp = self.grp
d = grp.d
if module in d.keys():
fileList = d[module]
else:
current.log.warning("Module '%s' doesn't exist!" % module)
return []
modlist = grp.modlist
strings = []
sappend = strings.append
R = TranslateReadFiles()
findstr = R.findstr
for f in fileList:
if f.endswith(".py") == True:
tmpstr = findstr(f, "ALL", modlist)
elif f.endswith(".html") == True or \
f.endswith(".js") == True:
tmpstr = R.read_html_js(f)
else:
tmpstr = []
for s in tmpstr:
sappend(("%s:%s" % (f, str(s[0])), s[1]))
# Handle "special" files separately
fileList = d["special"]
for f in fileList:
if f.endswith(".py") == True:
tmpstr = findstr(f, module, modlist)
for s in tmpstr:
sappend(("%s:%s" % (f, str(s[0])), s[1]))
return strings
# ---------------------------------------------------------------------
def get_strings_by_file(self, filename):
""" Return a list of strings in a given file """
if os.path.isfile(filename):
filename = os.path.abspath(filename)
else:
print "'%s' is not a valid file path!" % filename
return []
R = TranslateReadFiles()
strings = []
sappend = strings.append
tmpstr = []
if filename.endswith(".py") == True:
tmpstr = R.findstr(filename, "ALL", self.grp.modlist)
elif filename.endswith(".html") == True or \
filename.endswith(".js") == True:
tmpstr = R.read_html_js(filename)
else:
print "Please enter a '.py', '.js' or '.html' file path"
return []
for s in tmpstr:
sappend(("%s:%s" % (filename, str(s[0])), s[1]))
return strings
# =============================================================================
class TranslateGetFiles:
""" Class to group files by modules """
def __init__(self):
"""
Set up a dictionary to hold files belonging to a particular
module with the module name as the key. Files which contain
strings belonging to more than one module are grouped under
the "special" key.
"""
# Initialize to an empty list for each module
d = {}
modlist = self.get_module_list(current.request.folder)
for m in modlist:
d[m] = []
# List of files belonging to 'core' module
d["core"] = []
# 'special' files which contain strings from more than one module
d["special"] = []
self.d = d
self.modlist = modlist
# Directories which are not required to be searched
self.rest_dirs = ["languages", "docs", "tests",
"test", ".git", "uploads", "private"]
# ---------------------------------------------------------------------
@staticmethod
def get_module_list(dir):
"""
Returns a list of modules using files in /controllers/
as point of reference
"""
mod = []
mappend = mod.append
cont_dir = os.path.join(dir, "controllers")
mod_files = os.listdir(cont_dir)
for f in mod_files:
if f[0] != ".":
# Strip extension
mappend(f[:-3])
# Add Modules which aren't in controllers
mod += ["support",
"translate",
]
return mod
# ---------------------------------------------------------------------
def group_files(self, currentDir, curmod="", vflag=0):
"""
Recursive function to group Eden files into respective modules
"""
appname = current.request.application
path = os.path
currentDir = path.abspath(currentDir)
base_dir = path.basename(currentDir)
if base_dir in self.rest_dirs:
return
# If current directory is '/views', set vflag
if base_dir == "views":
vflag = 1
d = self.d
files = os.listdir(currentDir)
for f in files:
if f.startswith(".") or f.endswith(".pyc") or f in ("test.py", "tests.py"):
continue
curFile = path.join(currentDir, f)
if path.isdir(curFile):
# If the current directory is /views,
# categorize files based on the directory name
if vflag:
self.group_files(curFile, f, vflag)
else:
self.group_files(curFile, curmod, vflag)
else:
# If in /appname/views, categorize by parent directory name
if vflag:
base = curmod
# Categorize file as "special" as it contains strings
# belonging to various modules
elif f in ("s3menus.py", "s3cfg.py", "000_config.py", "config.py"):
base = "special"
else:
# Remove extension ('.py')
base = path.splitext(f)[0]
# If file has "s3" as prefix, remove "s3" to get module name
if "s3" in base:
base = base[2:]
# If file is inside /models and file name is
# of the form var_module.py, remove the "var_" prefix
#elif base_dir == "models" and "_" in base:
# base = base.split("_")[1]
# If base refers to a module, append to corresponding list
if base in d.keys():
d[base].append(curFile)
else:
# Append it to "core" files list
d["core"].append(curFile)
# =============================================================================
class TranslateParseFiles:
"""
Class to extract strings to translate from code files
"""
def __init__(self):
""" Initializes all object variables """
self.cflag = 0 # To indicate if next element is a class
self.fflag = 0 # To indicate if next element is a function
self.sflag = 0 # To indicate 'T' has just been found
self.tflag = 0 # To indicate we are currently inside T(...)
self.mflag = 0 # To indicate we are currently inside M(...)
self.bracket = 0 # Acts as a counter for parenthesis in T(...)
self.outstr = "" # Collects all the data inside T(...)
self.class_name = "" # Stores the current class name
self.func_name = "" # Stores the current function name
self.mod_name = "" # Stores module that the string may belong to
self.findent = -1 # Stores indentation level in menus.py
# ---------------------------------------------------------------------
def parseList(self, entry, tmpstr):
""" Recursive function to extract strings from a parse tree """
if isinstance(entry, list):
id = entry[0]
value = entry[1]
if isinstance(value, list):
parseList = self.parseList
for element in entry:
parseList(element, tmpstr)
else:
if token.tok_name[id] == "STRING":
tmpstr.append(value)
# ---------------------------------------------------------------------
def parseConfig(self, spmod, strings, entry, modlist):
""" Function to extract strings from config.py / 000_config.py """
if isinstance(entry, list):
id = entry[0]
value = entry[1]
# If the element is not a root node,
# go deeper into the tree using dfs
if isinstance(value, list):
parseConfig = self.parseConfig
for element in entry:
parseConfig(spmod, strings, element, modlist)
else:
if self.fflag == 1 and token.tok_name[id] == "NAME":
# Here, func_name stores the module_name of the form
# deployment.settings.module_name.variable
self.func_name = value
self.fflag = 0
# Set flag to store the module name from
# deployment_settings.module_name
elif token.tok_name[id] == "NAME" and \
(value == "deployment_settings" or \
value == "settings"):
self.fflag = 1
# Get module name from deployment_setting.modules list
elif self.tflag == 0 and self.func_name == "modules" and \
token.tok_name[id] == "STRING":
if value[1:-1] in modlist:
self.mod_name = value[1:-1]
# If 'T' is encountered, set sflag
elif token.tok_name[id] == "NAME" and value == "T":
self.sflag = 1
# If sflag is set and '(' is found, set tflag
elif self.sflag == 1:
if token.tok_name[id] == "LPAR":
self.tflag = 1
self.bracket = 1
self.sflag = 0
# Check if inside 'T()'
elif self.tflag == 1:
# If '(' is encountered, append it to outstr
if token.tok_name[id] == "LPAR":
self.bracket += 1
if self.bracket > 1:
self.outstr += "("
elif token.tok_name[id] == "RPAR":
self.bracket -= 1
# If it's not the last ')' of 'T()',
# append to outstr
if self.bracket > 0:
self.outstr += ")"
# If it's the last ')', add string to list
else:
if spmod == "core":
if self.func_name != "modules" and \
self.func_name not in modlist:
strings.append((entry[2], self.outstr))
elif (self.func_name == "modules" and \
self.mod_name == spmod) or \
(self.func_name == spmod):
strings.append((entry[2], self.outstr))
self.outstr = ""
self.tflag = 0
# If we are inside 'T()', append value to outstr
elif self.bracket > 0:
self.outstr += value
# ---------------------------------------------------------------------
def parseS3cfg(self, spmod, strings, entry, modlist):
""" Function to extract the strings from s3cfg.py """
if isinstance(entry, list):
id = entry[0]
value = entry[1]
if isinstance(value, list):
parseS3cfg = self.parseS3cfg
for element in entry:
parseS3cfg(spmod, strings, element, modlist)
else:
# If value is a function name, store it in func_name
if self.fflag == 1:
self.func_name = value
self.fflag = 0
# If value is 'def', set fflag to store func_name next
elif token.tok_name[id] == "NAME" and value == "def":
self.fflag = 1
# If 'T' is encountered, set sflag
elif token.tok_name[id] == "NAME" and value == "T":
self.sflag = 1
elif self.sflag == 1:
if token.tok_name[id] == "LPAR":
self.tflag = 1
self.bracket = 1
self.sflag = 0
elif self.tflag == 1:
if token.tok_name[id] == "LPAR":
self.bracket += 1
if self.bracket > 1:
self.outstr += "("
elif token.tok_name[id] == "RPAR":
self.bracket -= 1
if self.bracket > 0:
self.outstr += ")"
else:
# If core module is requested
if spmod == "core":
# If extracted data doesn't belong
# to any other module, append to list
if "_" not in self.func_name or \
self.func_name.split("_")[1] not in modlist:
strings.append((entry[2], self.outstr))
# If 'module' in 'get_module_variable()'
# is the requested module, append to list
elif "_" in self.func_name and \
self.func_name.split("_")[1] == spmod:
strings.append((entry[2], self.outstr))
self.outstr = ""
self.tflag = 0
elif self.bracket > 0:
self.outstr += value
# ---------------------------------------------------------------------
def parseMenu(self, spmod, strings, entry, level):
""" Function to extract the strings from menus.py """
if isinstance(entry, list):
id = entry[0]
value = entry[1]
if isinstance(value, list):
parseMenu = self.parseMenu
for element in entry:
parseMenu(spmod, strings, element, level + 1)
else:
# If value is a class name, store it in class_name
if self.cflag == 1:
self.class_name = value
self.cflag = 0
# If value is 'class', set cflag to store class name next
elif token.tok_name[id] == "NAME" and value == "class":
self.cflag = 1
elif self.fflag == 1:
# Here func_name is used to store the function names
# which are in 'S3OptionsMenu' class
self.func_name = value
self.fflag = 0
# If value is "def" and it's the first function in the
# S3OptionsMenu class or its indentation level is equal
# to the first function in 'S3OptionsMenu class', then
# set fflag and store the indentation level in findent
elif token.tok_name[id] == "NAME" and value == "def" and \
(self.findent == -1 or level == self.findent):
if self.class_name == "S3OptionsMenu":
self.findent = level
self.fflag = 1
else:
self.func_name = ""
# If current element is 'T', set sflag
elif token.tok_name[id] == "NAME" and value == "T":
self.sflag = 1
elif self.sflag == 1:
if token.tok_name[id] == "LPAR":
self.tflag = 1
self.bracket = 1
self.sflag = 0
# If inside 'T()', extract the data accordingly
elif self.tflag == 1:
if token.tok_name[id] == "LPAR":
self.bracket += 1
if self.bracket > 1:
self.outstr += "("
elif token.tok_name[id] == "RPAR":
self.bracket -= 1
if self.bracket > 0:
self.outstr += ")"
else:
# If the requested module is 'core' and
# extracted data doesn't lie inside the
# S3OptionsMenu class, append it to list
if spmod == "core":
if self.func_name == "":
strings.append((entry[2], self.outstr))
# If the function name (in S3OptionsMenu class)
# is equal to the module requested,
# then append it to list
elif self.func_name == spmod:
strings.append((entry[2], self.outstr))
self.outstr = ""
self.tflag = 0
elif self.bracket > 0:
self.outstr += value
else:
# Get strings inside 'M()'
# If value is 'M', set mflag
if token.tok_name[id] == "NAME" and value == "M":
self.mflag = 1
elif self.mflag == 1:
# If mflag is set and argument inside is a string,
# append it to list
if token.tok_name[id] == "STRING":
if spmod == "core":
if self.func_name == "":
strings.append((entry[2], value))
elif self.func_name == spmod:
strings.append((entry[2], value))
# If current argument in 'M()' is of type arg = var
# or if ')' is found, unset mflag
elif token.tok_name[id] == "EQUAL" or \
token.tok_name[id] == "RPAR":
self.mflag = 0
# ---------------------------------------------------------------------
def parseAll(self, strings, entry):
""" Function to extract all the strings from a file """
if isinstance(entry, list):
id = entry[0]
value = entry[1]
if isinstance(value, list):
parseAll = self.parseAll
for element in entry:
parseAll(strings, element)
else:
# If current element is 'T', set sflag
if token.tok_name[id] == "NAME" and value == "T":
self.sflag = 1
elif self.sflag == 1:
if token.tok_name[id] == "LPAR":
self.tflag = 1
self.bracket = 1
self.sflag = 0
# If inside 'T', extract data accordingly
elif self.tflag == 1:
if token.tok_name[id] == "LPAR":
self.bracket += 1
if self.bracket > 1:
self.outstr += "("
elif token.tok_name[id] == "RPAR":
self.bracket -= 1
if self.bracket > 0:
self.outstr += ")"
else:
strings.append((entry[2], self.outstr))
self.outstr = ""
self.tflag = 0
elif self.bracket > 0:
self.outstr += value
else:
# If current element is 'M', set mflag
if token.tok_name[id] == "NAME" and value == "M":
self.mflag = 1
elif self.mflag == 1:
# If inside 'M()', extract string accordingly
if token.tok_name[id] == "STRING":
strings.append((entry[2], value))
elif token.tok_name[id] == "EQUAL" or \
token.tok_name[id] == "RPAR":
self.mflag = 0
# =============================================================================
class TranslateReadFiles:
""" Class to read code files """
# ---------------------------------------------------------------------
@staticmethod
def findstr(fileName, spmod, modlist):
"""
Using the methods in TranslateParseFiles to extract the strings
fileName -> the file to be used for extraction
spmod -> the required module
modlist -> a list of all modules in Eden
"""
try:
f = open(fileName)
except:
path = os.path.split(__file__)[0]
fileName = os.path.join(path, fileName)
try:
f = open(fileName)
except:
return
# Read all contents of file
fileContent = f.read()
f.close()
# Remove CL-RF and NOEOL characters
fileContent = "%s\n" % fileContent.replace("\r", "")
try:
st = parser.suite(fileContent)
except:
return []
# Create a parse tree list for traversal
stList = parser.st2list(st, line_info=1)
P = TranslateParseFiles()
# List which holds the extracted strings
strings = []
if spmod == "ALL":
# If all strings are to be extracted, call ParseAll()
parseAll = P.parseAll
for element in stList:
parseAll(strings, element)
else:
# Handle cases for special files which contain
# strings belonging to different modules
appname = current.request.application
fileName = os.path.basename(fileName)
if fileName == "s3menus.py":
parseMenu = P.parseMenu
for element in stList:
parseMenu(spmod, strings, element, 0)
elif fileName == "s3cfg.py":
parseS3cfg = P.parseS3cfg
for element in stList:
parseS3cfg(spmod, strings, element, modlist)
elif fileName in ("000_config.py", "config.py"):
parseConfig = P.parseConfig
for element in stList:
parseConfig(spmod, strings, element, modlist)
# Extract strings from deployment_settings.variable() calls
final_strings = []
fsappend = final_strings.append
settings = current.deployment_settings
for (loc, s) in strings:
if s[0] != '"' and s[0] != "'":
# This is a variable
if "settings." in s:
# Convert the call to a standard form
s = s.replace("current.deployment_settings", "settings")
s = s.replace("()", "")
l = s.split(".")
obj = settings
# Get the actual value
for atr in l[1:]:
try:
obj = getattr(obj, atr)()
except:
current.log.warning("Can't find this deployment_setting, maybe a crud.settings", atr)
else:
s = obj
fsappend((loc, s))
else:
#@ToDo : Get the value of non-settings variables
pass
else:
fsappend((loc, s))
return final_strings
# ---------------------------------------------------------------------
@staticmethod
def read_html_js(filename):
"""
Function to read and extract strings from html/js files
using regular expressions
"""
import re
PY_STRING_LITERAL_RE = r'(?<=[^\w]T\()(?P<name>'\
+ r"[uU]?[rR]?(?:'''(?:[^']|'{1,2}(?!'))*''')|"\
+ r"(?:'(?:[^'\\]|\\.)*')|"\
+ r'(?:"""(?:[^"]|"{1,2}(?!"))*""")|'\
+ r'(?:"(?:[^"\\]|\\.)*"))'
regex_trans = re.compile(PY_STRING_LITERAL_RE, re.DOTALL)
findall = regex_trans.findall
html_js_file = open(filename)
linecount = 0
strings = []
sappend = strings.append
for line in html_js_file:
linecount += 1
occur = findall(line)
for s in occur:
sappend((linecount, s))
html_js_file.close()
return strings
# ---------------------------------------------------------------------
@staticmethod
def get_user_strings():
"""
Function to return the list of user-supplied strings
"""
user_file = os.path.join(current.request.folder, "uploads",
"user_strings.txt")
strings = []
COMMENT = "User supplied"
if os.path.exists(user_file):
f = open(user_file, "r")
for line in f:
line = line.replace("\n", "").replace("\r", "")
strings.append((COMMENT, line))
f.close()
return strings
# ---------------------------------------------------------------------
@staticmethod
def merge_user_strings_file(newstrings):
"""
Function to merge the existing file of user-supplied strings
with newly uploaded strings
"""
user_file = os.path.join(current.request.folder, "uploads",
"user_strings.txt")
oldstrings = []
oappend = oldstrings.append
if os.path.exists(user_file):
f = open(user_file, "r")
for line in f:
oappend(line)
f.close()
# Append user strings if not already present
f = open(user_file, "a")
for s in newstrings:
if s not in oldstrings:
f.write(s)
f.close()
# ---------------------------------------------------------------------
@staticmethod
def get_database_strings(all_template_flag):
"""
Function to get database strings from csv files
which are to be considered for translation.
"""
from s3import import S3BulkImporter
# List of database strings
database_strings = []
template_list = []
tappend = template_list.append
base_dir = current.request.folder
path = os.path
# If all templates flag is set we look in all templates' tasks.cfg file
if all_template_flag:
template_dir = path.join(base_dir, "private", "templates")
files = os.listdir(template_dir)
# template_list will have the list of all templates
for f in files:
curFile = path.join(template_dir, f)
baseFile = path.basename(curFile)
if path.isdir(curFile):
tappend(baseFile)
else:
# Set current template.
tappend(current.deployment_settings.base.template)
# Use bulk importer class to parse tasks.cfg in template folder
bi = S3BulkImporter()
S = Strings()
read_csv = S.read_csv
for template in template_list:
pth = path.join(base_dir, "private", "templates", template)
if path.exists(path.join(pth, "tasks.cfg")) == False:
continue
bi.load_descriptor(pth)
s3db = current.s3db
for csv in bi.tasks:
# Ignore special import files
if csv[0] != 1:
continue
# csv is in format: prefix, tablename, path of csv file
# assuming represent.translate is always on primary key id
translate = False
fieldname = "%s_%s_id" % (csv[1], csv[2])
if hasattr(s3db, fieldname) == False:
continue
reusable_field = s3db.get(fieldname)
if reusable_field:
represent = reusable_field.attr.represent
if hasattr(represent, "translate"):
translate = represent.translate
# If translate attribute is set to True
if translate:
if hasattr(represent, "fields") == False:
# Only name field is considered
fields = ["name"]
else:
# List of fields is retrieved from represent.fields
fields = represent.fields
# Consider it for translation (csv[3])
csv_path = csv[3]
try:
data = read_csv(csv_path)
except IOError:
# Phantom
continue
title_row = data[0]
idx = 0
idxlist = []
idxappend = idxlist.append
for e in title_row:
if e.lower() in fields:
idxappend(idx)
idx += 1
if idxlist:
# Line number of string retrieved.
line_number = 1
for row in data[1:]:
line_number += 1
# If string is not empty
for idx in idxlist:
try:
if row[idx] != "":
loc = "%s:%s" % (csv_path, line_number)
database_strings.append((loc, row[idx]))
except:
current.log.error("CSV row incomplete", csv_path)
return database_strings
# =============================================================================
class Strings:
""" Class to manipulate strings and their files """
# ---------------------------------------------------------------------
@staticmethod
def remove_quotes(Strings):
"""
Function to remove single or double quotes around the strings
"""
l = []
lappend = l.append
for (d1, d2) in Strings:
if (d1[0] == '"' and d1[-1] == '"') or \
(d1[0] == "'" and d1[-1] == "'"):
d1 = d1[1:-1]
if (d2[0] == '"' and d2[-1] == '"') or \
(d2[0] == "'" and d2[-1] == "'"):
d2 = d2[1:-1]
lappend((d1, d2))
return l
# ---------------------------------------------------------------------
@staticmethod
def remove_duplicates(Strings):
"""
Function to club all the duplicate strings into one row
with ";" separated locations
"""
uniq = {}
appname = current.request.application
for (loc, data) in Strings:
uniq[data] = ""
for (loc, data) in Strings:
# Remove the prefix from the filename
loc = loc.split(appname, 1)[1]
if uniq[data] != "":
uniq[data] = uniq[data] + ";" + loc
else:
uniq[data] = loc
l = []
lappend = l.append
for data in uniq.keys():
lappend((uniq[data], data))
return l
# ---------------------------------------------------------------------
@staticmethod
def remove_untranslated(lang_code):
"""
Function to remove all untranslated strings from a lang_code.py
"""
w2pfilename = os.path.join(current.request.folder, "languages",
"%s.py" % lang_code)
data = read_dict(w2pfilename)
#try:
# # Python 2.7
# # - won't even compile
# data = {k: v for k, v in data.iteritems() if k != v}
#except:
# Python 2.6
newdata = {}
for k, v in data.iteritems():
if k != v:
new_data[k] = v
data = new_data
write_dict(w2pfilename, data)
# ---------------------------------------------------------------------
def export_file(self, langfile, modlist, filelist, filetype, all_template_flag):
"""
Function to get the strings by module(s)/file(s), merge with
those strings from existing w2p language file which are already
translated and call the "write_xls()" method if the
default filetype "xls" is chosen. If "po" is chosen, then the
write_po()" method is called.
"""
request = current.request
settings = current.deployment_settings
appname = request.application
langcode = langfile[:-3]
langfile = os.path.join(request.folder, "languages", langfile)
# If the language file doesn't exist, create it
if not os.path.exists(langfile):
f = open(langfile, "w")
f.write("")
f.close()
NewStrings = []
A = TranslateAPI()
if all_template_flag == 1:
# Select All Templates
A.grp.group_files(os.path.join(request.folder, "private", "templates"))
else:
# A specific template is selected
template_folder = os.path.join(request.folder, "private", "templates", settings.get_template())
A.grp.group_files(template_folder)
R = TranslateReadFiles()
# Select Modules
# Core Modules are always included
core_modules = ["auth", "default"]
for mod in core_modules:
modlist.append(mod)
# appadmin and error are part of admin
if "admin" in modlist:
modlist.append("appadmin")
modlist.append("error")
# Select dependent modules
models = current.models
for mod in modlist:
if hasattr(models, mod):
obj = getattr(models, mod)
# Currently only inv module has a depends list
if hasattr(obj, "depends"):
for element in obj.depends:
if element not in modlist:
modlist.append(element)
get_strings_by_module = A.get_strings_by_module
for mod in modlist:
NewStrings += get_strings_by_module(mod)
# Retrieve strings in a file
get_strings_by_file = A.get_strings_by_file
for f in filelist:
NewStrings += get_strings_by_file(f)
# Remove quotes
NewStrings = self.remove_quotes(NewStrings)
# Add database strings
NewStrings += R.get_database_strings(all_template_flag)
# Add user-supplied strings
NewStrings += R.get_user_strings()
# Remove duplicates
NewStrings = self.remove_duplicates(NewStrings)
NewStrings.sort(key=lambda tup: tup[1])
# Retrieve strings from existing w2p language file
OldStrings = self.read_w2p(langfile)
OldStrings.sort(key=lambda tup: tup[0])
# Merge those strings which were already translated earlier
Strings = []
sappend = Strings.append
i = 0
lim = len(OldStrings)
for (l, s) in NewStrings:
while i < lim and OldStrings[i][0] < s:
i += 1
if i != lim and OldStrings[i][0] == s and \
OldStrings[i][1].startswith("*** ") == False:
sappend((l, s, OldStrings[i][1]))
else:
sappend((l, s, ""))
if filetype == "xls":
# Create excel file
return self.write_xls(Strings, langcode)
elif filetype == "po":
# Create pootle file
return self.write_po(Strings)
# ---------------------------------------------------------------------
@staticmethod
def read_csv(fileName):
""" Function to read a CSV file and return a list of rows """
import csv
csv.field_size_limit(2**20) # 1 Mb
data = []
dappend = data.append
f = open(fileName, "rb")
transReader = csv.reader(f)
for row in transReader:
dappend(row)
f.close()
return data
# ---------------------------------------------------------------------
@staticmethod
def read_w2p(fileName):
"""
Function to read a web2py language file and
return a list of translation string pairs
"""
data = read_dict(fileName)
# Convert to list of tuples
# @ToDo: Why?
strings = []
sappend = strings.append
for s in data:
sappend((s, data[s]))
return strings
# ---------------------------------------------------------------------
@staticmethod
def write_csv(fileName, data):
""" Function to write a list of rows into a csv file """
import csv
f = open(fileName, "wb")
# Quote all the elements while writing
transWriter = csv.writer(f, delimiter=" ",
quotechar='"', quoting = csv.QUOTE_ALL)
transWriter.writerow(["location", "source", "target"])
for row in data:
transWriter.writerow(row)
f.close()
# ---------------------------------------------------------------------
def write_po(self, data):
""" Returns a ".po" file constructed from given strings """
from subprocess import call
from tempfile import NamedTemporaryFile
from gluon.contenttype import contenttype
f = NamedTemporaryFile(delete=False)
csvfilename = "%s.csv" % f.name
self.write_csv(csvfilename, data)
g = NamedTemporaryFile(delete=False)
pofilename = "%s.po" % g.name
# Shell needed on Win32
# @ToDo: Copy relevant parts of Translate Toolkit internally to avoid external dependencies
call(["csv2po", "-i", csvfilename, "-o", pofilename], shell=True)
h = open(pofilename, "r")
# Modify headers to return the po file for download
filename = "trans.po"
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".po")
response.headers["Content-disposition"] = disposition
h.seek(0)
return h.read()
# ---------------------------------------------------------------------
def write_w2p(self, csvfiles, lang_code, option):
"""
Function to merge multiple translated csv files into one
and then merge/overwrite the existing w2p language file
"""
w2pfilename = os.path.join(current.request.folder, "languages",
"%s.py" % lang_code)
# Dictionary to store translated strings
# with untranslated string as the key
data = {}
errors = 0
for f in csvfiles:
newdata = self.read_csv(f)
# Test: 2 cols or 3?
cols = len(newdata[0])
if cols == 1:
raise SyntaxError("CSV file needs to have at least 2 columns!")
elif cols == 2:
# 1st column is source, 2nd is target
for row in newdata:
data[row[0]] = row[1]
else:
# 1st column is location, 2nd is source, 3rd is target
for row in newdata:
data[row[1]] = row[2]
if option == "m":
# Merge strings with existing .py file
keys = data.keys()
olddata = read_dict(w2pfilename)
for s in olddata:
if s not in keys:
data[s] = olddata[s]
write_dict(w2pfilename, data)
# ---------------------------------------------------------------------
@staticmethod
def write_xls(Strings, langcode):
"""
Function to create a spreadsheet (.xls file) of strings with
location, original string and translated string as columns
"""
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
import xlwt
from gluon.contenttype import contenttype
# Define spreadsheet properties
wbk = xlwt.Workbook("utf-8")
sheet = wbk.add_sheet("Translate")
style = xlwt.XFStyle()
font = xlwt.Font()
font.name = "Times New Roman"
style.font = font
sheet.write(0, 0, "location", style)
sheet.write(0, 1, "source", style)
sheet.write(0, 2, "target", style)
row_num = 1
# Write the data to spreadsheet
for (loc, d1, d2) in Strings:
d2 = d2.decode("string-escape").decode("utf-8")
sheet.write(row_num, 0, loc, style)
try:
sheet.write(row_num, 1, d1, style)
except:
current.log.warning("Invalid source string!", loc)
sheet.write(row_num, 1, "", style)
sheet.write(row_num, 2, d2, style)
row_num += 1
# Set column width
for colx in range(0, 3):
sheet.col(colx).width = 15000
# Initialize output
output = StringIO()
# Save the spreadsheet
wbk.save(output)
# Modify headers to return the xls file for download
filename = "%s.xls" % langcode
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".xls")
response.headers["Content-disposition"] = disposition
output.seek(0)
return output.read()
# =============================================================================
class Pootle:
"""
Class to synchronise a Pootle server's translation with the local
one
@ToDo: Before uploading file to Pootle, ensure all relevant
untranslated strings are present.
"""
# ---------------------------------------------------------------------
def upload(self, lang_code, filename):
"""
Upload a file to Pootle
"""
import mechanize
import re
br = mechanize.Browser()
br.addheaders = [("User-agent", "Firefox")]
br.set_handle_equiv(False)
# Ignore robots.txt
br.set_handle_robots(False)
# Don't add Referer (sic) header
br.set_handle_referer(False)
settings = current.deployment_settings
username = settings.get_L10n_pootle_username()
if username is False:
current.log.error("No login information found")
return
pootle_url = settings.get_L10n_pootle_url()
login_url = "%saccounts/login" % pootle_url
try:
br.open(login_url)
except:
current.log.error("Connecton Error")
return
br.select_form("loginform")
br.form["username"] = username
br.form["password"] = settings.get_L10n_pootle_password()
br.submit()
current_url = br.geturl()
if current_url.endswith("login/"):
current.log.error("Login Error")
return
pattern = "<option value=(.+?)>%s.po" % lang_code
# Process lang_code (if of form ab_cd --> convert to ab_CD)
if len(lang_code) > 2:
lang_code = "%s_%s" % (lang_code[:2], lang_code[-2:].upper())
link = "%s%s/eden/" % (pootle_url, lang_code)
page_source = br.open(link).read()
# Use Regex to extract the value for field : "upload to"
regex = re.search(pattern, page_source)
result = regex.group(0)
result = re.split(r'[="]', result)
upload_code = result[2]
try:
br.select_form("uploadform")
# If user is not admin then overwrite option is not there
br.form.find_control(name="overwrite").value = ["overwrite"]
br.form.find_control(name ="upload_to").value = [upload_code]
br.form.add_file(open(filename), "text/plain", file_name)
br.submit()
except:
current.log.error("Error in Uploading form")
return
# ---------------------------------------------------------------------
def download(self, lang_code):
"""
Download a file from Pootle
@ToDo: Allow selection between different variants of language files
"""
import requests
import zipfile
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from subprocess import call
from tempfile import NamedTemporaryFile
code = lang_code
if len(lang_code) > 2:
code = "%s_%s" % (lang_code[:2], lang_code[-2:].upper())
pootle_url = current.deployment_settings.get_L10n_pootle_url()
link = "%s%s/eden/export/zip" % (pootle_url, code)
try:
r = requests.get(link)
except:
current.log.error("Connection Error")
return False
zipf = zipfile.ZipFile(StringIO.StringIO(r.content))
zipf.extractall()
file_name_po = "%s.po" % lang_code
file_name_py = "%s.py" % lang_code
f = NamedTemporaryFile(delete=False)
w2pfilename = "%s.py" % f.name
call(["po2web2py", "-i", file_name_po, "-o", w2pfilename])
S = Strings()
path = os.path.join(current.request.folder, "languages", file_name_py)
pystrings = S.read_w2p(path)
pystrings.sort(key=lambda tup: tup[0])
postrings = S.read_w2p(w2pfilename)
# Remove untranslated strings
postrings = [tup for tup in postrings if tup[0] != tup[1]]
postrings.sort(key=lambda tup: tup[0])
os.unlink(file_name_po)
os.unlink(w2pfilename)
return (postrings, pystrings)
# ---------------------------------------------------------------------
def merge_strings(self, postrings, pystrings, preference):
"""
Merge strings from a PO file and a Py file
"""
lim_po = len(postrings)
lim_py = len(pystrings)
i = 0
j = 0
# Store strings which are missing from pootle
extra = []
eappend = extra.append
while i < lim_py and j < lim_po:
if pystrings[i][0] < postrings[j][0]:
if preference == False:
eappend(pystrings[i])
i += 1
elif pystrings[i][0] > postrings[j][0]:
j += 1
# pystrings[i] == postrings[j]
else:
# Pootle is being given preference
if preference:
# Check if string is not empty
if postrings[j][1] and not postrings[j][1].startswith("***"):
pystrings[i] = postrings[j]
# Py is being given prefernece
else:
if pystrings[i][1] and not pystrings[i][1].startswith("***"):
postrings[j] = pystrings[i]
i += 1
j += 1
if preference:
return pystrings
else:
# Add strings which were left
while i < lim_py:
extra.append(pystrings[i])
i += 1
# Add extra strings to Pootle list
for st in extra:
postrings.append(st)
postrings.sort(key=lambda tup: tup[0])
return postrings
# ---------------------------------------------------------------------
def merge_pootle(self, preference, lang_code):
# returns a tuple (postrings, pystrings)
ret = self.download(lang_code)
if not ret:
return
from subprocess import call
from tempfile import NamedTemporaryFile
import sys
# returns pystrings if preference was True else returns postrings
ret = self.merge_strings(ret[0], ret[1], preference)
S = Strings()
data = []
dappend = data.append
temp_csv = NamedTemporaryFile(delete=False)
csvfilename = "%s.csv" % temp_csv.name
if preference:
# Only python file has been changed
for i in ret:
dappend(("", i[0], i[1].decode("string-escape")))
S.write_csv(csvfilename, data)
# overwrite option
S.write_w2p([csvfilename], lang_code, "o")
os.unlink(csvfilename)
else:
# Only Pootle file has been changed
for i in ret:
dappend(("", i[0], i[1].decode("string-escape")))
S.write_csv(csvfilename, data)
temp_po = NamedTemporaryFile(delete=False)
pofilename = "%s.po" % temp_po.name
# Shell needed on Win32
# @ToDo: Copy relevant parts of Translate Toolkit internally to avoid external dependencies
call(["csv2po", "-i", csvfilename, "-o", pofilename], shell=True)
self.upload(lang_code, pofilename)
# Clean up extra created files
os.unlink(csvfilename)
os.unlink(pofilename)
# =============================================================================
class TranslateReportStatus(object):
"""
Class to report the percentage of translated strings for
each module for a given language.
"""
# -------------------------------------------------------------------------
@classmethod
def create_master_file(cls):
"""
Create master file of strings and their distribution in modules
"""
try:
import cPickle as pickle
except:
import pickle
# Instantiate the translateAPI
api = TranslateAPI()
# Generate list of modules
modules = api.get_modules()
modules.append("core")
# The list of all strings
all_strings = []
addstring = all_strings.append
# Dictionary of {module: indices of strings used in this module}
indices = {}
# Helper dict for fast lookups
string_indices = {}
index = 0
get_strings_by_module = api.get_strings_by_module
for module in modules:
module_indices = []
addindex = module_indices.append
strings = get_strings_by_module(module)
for (origin, string) in strings:
# Remove outermost quotes around the string
if (string[0] == '"' and string[-1] == '"') or\
(string[0] == "'" and string[-1] == "'"):
string = string[1:-1]
string_index = string_indices.get(string)
if string_index is None:
string_indices[string] = index
addstring(string)
addindex(index)
index += 1
else:
addindex(string_index)
indices[module] = module_indices
# Save all_strings and string_dict as pickle objects in a file
data_file = os.path.join(current.request.folder,
"uploads",
"temp.pkl")
f = open(data_file, "wb")
pickle.dump(all_strings, f)
pickle.dump(indices, f)
f.close()
# Mark all string counts as dirty
ptable = current.s3db.translate_percentage
current.db(ptable.id > 0).update(dirty=True)
# -------------------------------------------------------------------------
@classmethod
def update_string_counts(cls, lang_code):
"""
Update the translation percentages for all modules for a given
language.
@ToDo: Generate fresh .py files with all relevant strings for this
(since we don't store untranslated strings)
"""
try:
import cPickle as pickle
except:
import pickle
base_dir = current.request.folder
# Read the language file
langfile = "%s.py" % lang_code
langfile = os.path.join(base_dir, "languages", langfile)
lang_strings = read_dict(langfile)
# Retrieve the data stored in master file
data_file = os.path.join(base_dir, "uploads", "temp.pkl")
f = open(data_file, "rb")
all_strings = pickle.load(f)
string_dict = pickle.load(f)
f.close()
db = current.db
ptable = current.s3db.translate_percentage
translated = set()
addindex = translated.add
for index, string in enumerate(all_strings):
translation = lang_strings.get(string)
if translation is None or translation[:4] == "*** ":
continue
elif translation != string or lang_code == "en-gb":
addindex(index)
for module, indices in string_dict.items():
all_indices = set(indices)
num_untranslated = len(all_indices - translated)
num_translated = len(all_indices) - num_untranslated
data = dict(code = lang_code,
module = module,
translated = num_translated,
untranslated = num_untranslated,
dirty=False)
query = (ptable.code == lang_code) & \
(ptable.module == module)
record = db(query).select(ptable._id, limitby=(0, 1)).first()
if record:
record.update_record(**data)
else:
ptable.insert(**data)
return
# -------------------------------------------------------------------------
@classmethod
def get_translation_percentages(cls, lang_code):
"""
Get the percentages of translated strings per module for
the given language code.
@param lang_code: the language code
"""
pickle_file = os.path.join(current.request.folder,
"uploads",
"temp.pkl")
# If master file doesn't exist, create it
if not os.path.exists(pickle_file):
cls.create_master_file()
db = current.db
ptable = current.s3db.translate_percentage
query = (ptable.code == lang_code)
fields = ("dirty", "translated", "untranslated", "module")
rows = db(query).select(*fields)
if not rows or rows.first().dirty:
# Update the string counts
cls.update_string_counts(lang_code)
rows = db(query).select(*fields)
percentage = {}
total_strings = 0
total_translated = 0
total_untranslated = 0
for row in rows:
num_translated = row.translated
num_untranslated = row.untranslated
total_strings += num_translated + num_untranslated
if not num_untranslated:
percentage[row.module] = 100
else:
total = num_translated + num_untranslated
total_translated += num_translated
total_untranslated += num_untranslated
percentage[row.module] = \
round((float(num_translated) / total) * 100, 2)
if not total_untranslated:
percentage["complete_file"] = 100
else:
percentage["complete_file"] = \
round((float(total_translated) / (total_strings)) * 100, 2)
return percentage
# END =========================================================================
| collective/eden | modules/s3/s3translate.py | Python | mit | 64,871 |
from .generate_detachment_ltd_erosion import DetachmentLtdErosion
from .generate_erosion_by_depth_slope import DepthSlopeProductErosion
__all__ = ["DetachmentLtdErosion", "DepthSlopeProductErosion"]
| landlab/landlab | landlab/components/detachment_ltd_erosion/__init__.py | Python | mit | 200 |
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_allclose, assert_, assert_raises,
assert_array_equal)
import pywt
# Check that float32, float64, complex64, complex128 are preserved.
# Other real types get converted to float64.
# complex256 gets converted to complex128
dtypes_in = [np.int8, np.float16, np.float32, np.float64, np.complex64,
np.complex128]
dtypes_out = [np.float64, np.float32, np.float32, np.float64, np.complex64,
np.complex128]
# test complex256 as well if it is available
try:
dtypes_in += [np.complex256, ]
dtypes_out += [np.complex128, ]
except AttributeError:
pass
def test_dwt_idwt_basic():
x = [3, 7, 1, 1, -2, 5, 4, 6]
cA, cD = pywt.dwt(x, 'db2')
cA_expect = [5.65685425, 7.39923721, 0.22414387, 3.33677403, 7.77817459]
cD_expect = [-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
# mismatched dtypes OK
x_roundtrip2 = pywt.idwt(cA.astype(np.float64), cD.astype(np.float32),
'db2')
assert_allclose(x_roundtrip2, x, rtol=1e-7, atol=1e-7)
assert_(x_roundtrip2.dtype == np.float64)
def test_idwt_mixed_complex_dtype():
x = np.arange(8).astype(float)
x = x + 1j*x[::-1]
cA, cD = pywt.dwt(x, 'db2')
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
# mismatched dtypes OK
x_roundtrip2 = pywt.idwt(cA.astype(np.complex128), cD.astype(np.complex64),
'db2')
assert_allclose(x_roundtrip2, x, rtol=1e-7, atol=1e-7)
assert_(x_roundtrip2.dtype == np.complex128)
def test_dwt_idwt_dtypes():
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
x = np.ones(4, dtype=dt_in)
errmsg = "wrong dtype returned for {0} input".format(dt_in)
cA, cD = pywt.dwt(x, wavelet)
assert_(cA.dtype == cD.dtype == dt_out, "dwt: " + errmsg)
x_roundtrip = pywt.idwt(cA, cD, wavelet)
assert_(x_roundtrip.dtype == dt_out, "idwt: " + errmsg)
def test_dwt_idwt_basic_complex():
x = np.asarray([3, 7, 1, 1, -2, 5, 4, 6])
x = x + 0.5j*x
cA, cD = pywt.dwt(x, 'db2')
cA_expect = np.asarray([5.65685425, 7.39923721, 0.22414387, 3.33677403,
7.77817459])
cA_expect = cA_expect + 0.5j*cA_expect
cD_expect = np.asarray([-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487])
cD_expect = cD_expect + 0.5j*cD_expect
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
def test_dwt_idwt_partial_complex():
x = np.asarray([3, 7, 1, 1, -2, 5, 4, 6])
x = x + 0.5j*x
cA, cD = pywt.dwt(x, 'haar')
cA_rec_expect = np.array([5.0+2.5j, 5.0+2.5j, 1.0+0.5j, 1.0+0.5j,
1.5+0.75j, 1.5+0.75j, 5.0+2.5j, 5.0+2.5j])
cA_rec = pywt.idwt(cA, None, 'haar')
assert_allclose(cA_rec, cA_rec_expect)
cD_rec_expect = np.array([-2.0-1.0j, 2.0+1.0j, 0.0+0.0j, 0.0+0.0j,
-3.5-1.75j, 3.5+1.75j, -1.0-0.5j, 1.0+0.5j])
cD_rec = pywt.idwt(None, cD, 'haar')
assert_allclose(cD_rec, cD_rec_expect)
assert_allclose(cA_rec + cD_rec, x)
def test_dwt_wavelet_kwd():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
cA, cD = pywt.dwt(x, wavelet=w, mode='constant')
cA_expect = [4.38354585, 3.80302657, 7.31813271, -0.58565539, 4.09727044,
7.81994027]
cD_expect = [-1.33068221, -2.78795192, -3.16825651, -0.67715519,
-0.09722957, -0.07045258]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
def test_dwt_coeff_len():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
ln_modes = [pywt.dwt_coeff_len(len(x), w.dec_len, mode) for mode in
pywt.Modes.modes]
expected_result = [6, ] * len(pywt.Modes.modes)
expected_result[pywt.Modes.modes.index('periodization')] = 4
assert_allclose(ln_modes, expected_result)
ln_modes = [pywt.dwt_coeff_len(len(x), w, mode) for mode in
pywt.Modes.modes]
assert_allclose(ln_modes, expected_result)
def test_idwt_none_input():
# None input equals arrays of zeros of the right length
res1 = pywt.idwt([1, 2, 0, 1], None, 'db2', 'symmetric')
res2 = pywt.idwt([1, 2, 0, 1], [0, 0, 0, 0], 'db2', 'symmetric')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
res1 = pywt.idwt(None, [1, 2, 0, 1], 'db2', 'symmetric')
res2 = pywt.idwt([0, 0, 0, 0], [1, 2, 0, 1], 'db2', 'symmetric')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
# Only one argument at a time can be None
assert_raises(ValueError, pywt.idwt, None, None, 'db2', 'symmetric')
def test_idwt_invalid_input():
# Too short, min length is 4 for 'db4':
assert_raises(ValueError, pywt.idwt, [1, 2, 4], [4, 1, 3], 'db4', 'symmetric')
def test_dwt_single_axis():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA, cD = pywt.dwt(x, 'db2', axis=-1)
cA0, cD0 = pywt.dwt(x[0], 'db2')
cA1, cD1 = pywt.dwt(x[1], 'db2')
assert_allclose(cA[0], cA0)
assert_allclose(cA[1], cA1)
assert_allclose(cD[0], cD0)
assert_allclose(cD[1], cD1)
def test_idwt_single_axis():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
x = np.asarray(x)
x = x + 1j*x # test with complex data
cA, cD = pywt.dwt(x, 'db2', axis=-1)
x0 = pywt.idwt(cA[0], cD[0], 'db2', axis=-1)
x1 = pywt.idwt(cA[1], cD[1], 'db2', axis=-1)
assert_allclose(x[0], x0)
assert_allclose(x[1], x1)
def test_dwt_invalid_input():
x = np.arange(1)
assert_raises(ValueError, pywt.dwt, x, 'db2', 'reflect')
assert_raises(ValueError, pywt.dwt, x, 'haar', 'antireflect')
def test_dwt_axis_arg():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA_, cD_ = pywt.dwt(x, 'db2', axis=-1)
cA, cD = pywt.dwt(x, 'db2', axis=1)
assert_allclose(cA_, cA)
assert_allclose(cD_, cD)
def test_dwt_axis_invalid_input():
x = np.ones((3,1))
assert_raises(ValueError, pywt.dwt, x, 'db2', 'reflect')
def test_idwt_axis_arg():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA, cD = pywt.dwt(x, 'db2', axis=1)
x_ = pywt.idwt(cA, cD, 'db2', axis=-1)
x = pywt.idwt(cA, cD, 'db2', axis=1)
assert_allclose(x_, x)
def test_dwt_idwt_axis_excess():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
# can't transform over axes that aren't there
assert_raises(ValueError,
pywt.dwt, x, 'db2', 'symmetric', axis=2)
assert_raises(ValueError,
pywt.idwt, [1, 2, 4], [4, 1, 3], 'db2', 'symmetric', axis=1)
def test_error_on_continuous_wavelet():
# A ValueError is raised if a Continuous wavelet is selected
data = np.ones((32, ))
for cwave in ['morl', pywt.DiscreteContinuousWavelet('morl')]:
assert_raises(ValueError, pywt.dwt, data, cwave)
cA, cD = pywt.dwt(data, 'db1')
assert_raises(ValueError, pywt.idwt, cA, cD, cwave)
def test_dwt_zero_size_axes():
# raise on empty input array
assert_raises(ValueError, pywt.dwt, [], 'db2')
# >1D case uses a different code path so check there as well
x = np.ones((1, 4))[0:0, :] # 2D with a size zero axis
assert_raises(ValueError, pywt.dwt, x, 'db2', axis=0)
def test_pad_1d():
x = [1, 2, 3]
assert_array_equal(pywt.pad(x, (4, 6), 'periodization'),
[1, 2, 3, 3, 1, 2, 3, 3, 1, 2, 3, 3, 1, 2])
assert_array_equal(pywt.pad(x, (4, 6), 'periodic'),
[3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3])
assert_array_equal(pywt.pad(x, (4, 6), 'constant'),
[1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3])
assert_array_equal(pywt.pad(x, (4, 6), 'zero'),
[0, 0, 0, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0])
assert_array_equal(pywt.pad(x, (4, 6), 'smooth'),
[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
assert_array_equal(pywt.pad(x, (4, 6), 'symmetric'),
[3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
assert_array_equal(pywt.pad(x, (4, 6), 'antisymmetric'),
[3, -3, -2, -1, 1, 2, 3, -3, -2, -1, 1, 2, 3])
assert_array_equal(pywt.pad(x, (4, 6), 'reflect'),
[1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1])
assert_array_equal(pywt.pad(x, (4, 6), 'antireflect'),
[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# equivalence of various pad_width formats
assert_array_equal(pywt.pad(x, 4, 'periodic'),
pywt.pad(x, (4, 4), 'periodic'))
assert_array_equal(pywt.pad(x, (4, ), 'periodic'),
pywt.pad(x, (4, 4), 'periodic'))
assert_array_equal(pywt.pad(x, [(4, 4)], 'periodic'),
pywt.pad(x, (4, 4), 'periodic'))
def test_pad_errors():
# negative pad width
x = [1, 2, 3]
assert_raises(ValueError, pywt.pad, x, -2, 'periodic')
# wrong length pad width
assert_raises(ValueError, pywt.pad, x, (1, 1, 1), 'periodic')
# invalid mode name
assert_raises(ValueError, pywt.pad, x, 2, 'bad_mode')
def test_pad_nd():
for ndim in [2, 3]:
x = np.arange(4**ndim).reshape((4, ) * ndim)
if ndim == 2:
pad_widths = [(2, 1), (2, 3)]
else:
pad_widths = [(2, 1), ] * ndim
for mode in pywt.Modes.modes:
xp = pywt.pad(x, pad_widths, mode)
# expected result is the same as applying along axes separably
xp_expected = x.copy()
for ax in range(ndim):
xp_expected = np.apply_along_axis(pywt.pad,
ax,
xp_expected,
pad_widths=[pad_widths[ax]],
mode=mode)
assert_array_equal(xp, xp_expected)
| PyWavelets/pywt | pywt/tests/test_dwt_idwt.py | Python | mit | 10,352 |
# Time: O(n)
# Space: O(1)
#
# Say you have an array for which the ith element is
# the price of a given stock on day i.
#
# Design an algorithm to find the maximum profit.
# You may complete as many transactions as you like
# (ie, buy one and sell one share of the stock multiple times).
# However, you may not engage in multiple transactions at the same time
# (ie, you must sell the stock before you buy again).
class Solution:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
profit = 0
for i in xrange(len(prices) - 1):
profit += max(0, prices[i + 1] - prices[i])
return profit
def maxProfit2(self, prices):
return sum(map(lambda x: max(prices[x + 1] - prices[x], 0), range(len(prices[:-1]))))
if __name__ == "__main__":
result = Solution().maxProfit([3, 2, 1, 4, 2, 5, 6])
print result
| yiwen-luo/LeetCode | Python/best-time-to-buy-and-sell-stock-ii.py | Python | mit | 912 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: IBM Model 3
#
# Copyright (C) 2001-2013 NLTK Project
# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Translation model that considers how a word can be aligned to
multiple words in another language.
IBM Model 3 improves on Model 2 by directly modeling the phenomenon
where a word in one language may be translated into zero or more words
in another. This is expressed by the fertility probability,
n(phi | source word).
If a source word translates into more than one word, it is possible to
generate sentences that have the same alignment in multiple ways. This
is modeled by a distortion step. The distortion probability, d(j|i,l,m),
predicts a target word position, given its aligned source word's
position. The distortion probability replaces the alignment probability
of Model 2.
The fertility probability is not applicable for NULL. Target words that
align to NULL are assumed to be distributed uniformly in the target
sentence. The existence of these words is modeled by p1, the probability
that a target word produced by a real source word requires another
target word that is produced by NULL.
The EM algorithm used in Model 3 is:
E step - In the training data, collect counts, weighted by prior
probabilities.
(a) count how many times a source language word is translated
into a target language word
(b) count how many times a particular position in the target
sentence is aligned to a particular position in the source
sentence
(c) count how many times a source word is aligned to phi number
of target words
(d) count how many times NULL is aligned to a target word
M step - Estimate new probabilities based on the counts from the E step
Because there are too many possible alignments, only the most probable
ones are considered. First, the best alignment is determined using prior
probabilities. Then, a hill climbing approach is used to find other good
candidates.
Notations:
i: Position in the source sentence
Valid values are 0 (for NULL), 1, 2, ..., length of source sentence
j: Position in the target sentence
Valid values are 1, 2, ..., length of target sentence
l: Number of words in the source sentence, excluding NULL
m: Number of words in the target sentence
s: A word in the source language
t: A word in the target language
phi: Fertility, the number of target words produced by a source word
p1: Probability that a target word produced by a source word is
accompanied by another target word that is aligned to NULL
p0: 1 - p1
References:
Philipp Koehn. 2010. Statistical Machine Translation.
Cambridge University Press, New York.
Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and
Robert L. Mercer. 1993. The Mathematics of Statistical Machine
Translation: Parameter Estimation. Computational Linguistics, 19 (2),
263-311.
"""
from __future__ import division
from collections import defaultdict
from math import factorial
from nltk.translate import AlignedSent
from nltk.translate import Alignment
from nltk.translate import IBMModel
from nltk.translate import IBMModel2
from nltk.translate.ibm_model import Counts
import warnings
class IBMModel3(IBMModel):
"""
Translation model that considers how a word can be aligned to
multiple words in another language
>>> bitext = []
>>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big']))
>>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small']))
>>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house']))
>>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book']))
>>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book']))
>>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book']))
>>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize']))
>>> ibm3 = IBMModel3(bitext, 5)
>>> print(round(ibm3.translation_table['buch']['book'], 3))
1.0
>>> print(round(ibm3.translation_table['das']['book'], 3))
0.0
>>> print(round(ibm3.translation_table['ja'][None], 3))
1.0
>>> print(round(ibm3.distortion_table[1][1][2][2], 3))
1.0
>>> print(round(ibm3.distortion_table[1][2][2][2], 3))
0.0
>>> print(round(ibm3.distortion_table[2][2][4][5], 3))
0.75
>>> print(round(ibm3.fertility_table[2]['summarize'], 3))
1.0
>>> print(round(ibm3.fertility_table[1]['book'], 3))
1.0
>>> print(ibm3.p1)
0.054...
>>> test_sentence = bitext[2]
>>> test_sentence.words
['das', 'buch', 'ist', 'ja', 'klein']
>>> test_sentence.mots
['the', 'book', 'is', 'small']
>>> test_sentence.alignment
Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)])
"""
def __init__(self, sentence_aligned_corpus, iterations,
probability_tables=None):
"""
Train on ``sentence_aligned_corpus`` and create a lexical
translation model, a distortion model, a fertility model, and a
model for generating NULL-aligned words.
Translation direction is from ``AlignedSent.mots`` to
``AlignedSent.words``.
:param sentence_aligned_corpus: Sentence-aligned parallel corpus
:type sentence_aligned_corpus: list(AlignedSent)
:param iterations: Number of iterations to run training algorithm
:type iterations: int
:param probability_tables: Optional. Use this to pass in custom
probability values. If not specified, probabilities will be
set to a uniform distribution, or some other sensible value.
If specified, all the following entries must be present:
``translation_table``, ``alignment_table``,
``fertility_table``, ``p1``, ``distortion_table``.
See ``IBMModel`` for the type and purpose of these tables.
:type probability_tables: dict[str]: object
"""
super(IBMModel3, self).__init__(sentence_aligned_corpus)
self.reset_probabilities()
if probability_tables is None:
# Get translation and alignment probabilities from IBM Model 2
ibm2 = IBMModel2(sentence_aligned_corpus, iterations)
self.translation_table = ibm2.translation_table
self.alignment_table = ibm2.alignment_table
self.set_uniform_probabilities(sentence_aligned_corpus)
else:
# Set user-defined probabilities
self.translation_table = probability_tables['translation_table']
self.alignment_table = probability_tables['alignment_table']
self.fertility_table = probability_tables['fertility_table']
self.p1 = probability_tables['p1']
self.distortion_table = probability_tables['distortion_table']
for n in range(0, iterations):
self.train(sentence_aligned_corpus)
def reset_probabilities(self):
super(IBMModel3, self).reset_probabilities()
self.distortion_table = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
lambda: self.MIN_PROB))))
"""
dict[int][int][int][int]: float. Probability(j | i,l,m).
Values accessed as ``distortion_table[j][i][l][m]``.
"""
def set_uniform_probabilities(self, sentence_aligned_corpus):
# d(j | i,l,m) = 1 / m for all i, j, l, m
l_m_combinations = set()
for aligned_sentence in sentence_aligned_corpus:
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
if (l, m) not in l_m_combinations:
l_m_combinations.add((l, m))
initial_prob = 1 / float(m)
if initial_prob < IBMModel.MIN_PROB:
warnings.warn("A target sentence is too long (" + str(m) +
" words). Results may be less accurate.")
for j in range(1, m + 1):
for i in range(0, l + 1):
self.distortion_table[j][i][l][m] = initial_prob
# simple initialization, taken from GIZA++
self.fertility_table[0] = defaultdict(lambda: 0.2)
self.fertility_table[1] = defaultdict(lambda: 0.65)
self.fertility_table[2] = defaultdict(lambda: 0.1)
self.fertility_table[3] = defaultdict(lambda: 0.04)
MAX_FERTILITY = 10
initial_fert_prob = 0.01 / (MAX_FERTILITY - 4)
for phi in range(4, MAX_FERTILITY):
self.fertility_table[phi] = defaultdict(lambda: initial_fert_prob)
self.p1 = 0.5
def train(self, parallel_corpus):
counts = Model3Counts()
for aligned_sentence in parallel_corpus:
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
# Sample the alignment space
sampled_alignments, best_alignment = self.sample(aligned_sentence)
# Record the most probable alignment
aligned_sentence.alignment = Alignment(
best_alignment.zero_indexed_alignment())
# E step (a): Compute normalization factors to weigh counts
total_count = self.prob_of_alignments(sampled_alignments)
# E step (b): Collect counts
for alignment_info in sampled_alignments:
count = self.prob_t_a_given_s(alignment_info)
normalized_count = count / total_count
for j in range(1, m + 1):
counts.update_lexical_translation(
normalized_count, alignment_info, j)
counts.update_distortion(
normalized_count, alignment_info, j, l, m)
counts.update_null_generation(normalized_count, alignment_info)
counts.update_fertility(normalized_count, alignment_info)
# M step: Update probabilities with maximum likelihood estimates
# If any probability is less than MIN_PROB, clamp it to MIN_PROB
existing_alignment_table = self.alignment_table
self.reset_probabilities()
self.alignment_table = existing_alignment_table # don't retrain
self.maximize_lexical_translation_probabilities(counts)
self.maximize_distortion_probabilities(counts)
self.maximize_fertility_probabilities(counts)
self.maximize_null_generation_probabilities(counts)
def maximize_distortion_probabilities(self, counts):
MIN_PROB = IBMModel.MIN_PROB
for j, i_s in counts.distortion.items():
for i, src_sentence_lengths in i_s.items():
for l, trg_sentence_lengths in src_sentence_lengths.items():
for m in trg_sentence_lengths:
estimate = (counts.distortion[j][i][l][m] /
counts.distortion_for_any_j[i][l][m])
self.distortion_table[j][i][l][m] = max(estimate,
MIN_PROB)
def prob_t_a_given_s(self, alignment_info):
"""
Probability of target sentence and an alignment given the
source sentence
"""
src_sentence = alignment_info.src_sentence
trg_sentence = alignment_info.trg_sentence
l = len(src_sentence) - 1 # exclude NULL
m = len(trg_sentence) - 1
p1 = self.p1
p0 = 1 - p1
probability = 1.0
MIN_PROB = IBMModel.MIN_PROB
# Combine NULL insertion probability
null_fertility = alignment_info.fertility_of_i(0)
probability *= (pow(p1, null_fertility) *
pow(p0, m - 2 * null_fertility))
if probability < MIN_PROB:
return MIN_PROB
# Compute combination (m - null_fertility) choose null_fertility
for i in range(1, null_fertility + 1):
probability *= (m - null_fertility - i + 1) / i
if probability < MIN_PROB:
return MIN_PROB
# Combine fertility probabilities
for i in range(1, l + 1):
fertility = alignment_info.fertility_of_i(i)
probability *= (factorial(fertility) *
self.fertility_table[fertility][src_sentence[i]])
if probability < MIN_PROB:
return MIN_PROB
# Combine lexical and distortion probabilities
for j in range(1, m + 1):
t = trg_sentence[j]
i = alignment_info.alignment[j]
s = src_sentence[i]
probability *= (self.translation_table[t][s] *
self.distortion_table[j][i][l][m])
if probability < MIN_PROB:
return MIN_PROB
return probability
class Model3Counts(Counts):
"""
Data object to store counts of various parameters during training.
Includes counts for distortion.
"""
def __init__(self):
super(Model3Counts, self).__init__()
self.distortion = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
lambda: 0.0))))
self.distortion_for_any_j = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: 0.0)))
def update_distortion(self, count, alignment_info, j, l, m):
i = alignment_info.alignment[j]
self.distortion[j][i][l][m] += count
self.distortion_for_any_j[i][l][m] += count
| nelango/ViralityAnalysis | model/lib/nltk/translate/ibm3.py | Python | mit | 13,875 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('premises', '0030_report_reason'),
]
operations = [
migrations.AddField(
model_name='premise',
name='weight',
field=models.IntegerField(default=0),
preserve_default=True,
),
]
| bahattincinic/arguman.org | web/premises/migrations/0031_premise_weight.py | Python | mit | 431 |
if __name__ == '__main__':
import os
import sys
port = int(sys.argv[1])
root_dirname = os.path.dirname(os.path.dirname(__file__))
if root_dirname not in sys.path:
sys.path.append(root_dirname)
print('before pydevd.settrace')
breakpoint(port=port) # Set up through custom sitecustomize.py
print('after pydevd.settrace')
print('TEST SUCEEDED!')
| Elizaveta239/PyDev.Debugger | tests_python/resources/_debugger_case_breakpoint_remote_no_import.py | Python | epl-1.0 | 406 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Form_NewProject.ui'
#
# Created: Mon Sep 9 21:29:21 2013
# by: PyQt4 UI code generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_NewProject(object):
def setupUi(self, NewProject):
NewProject.setObjectName(_fromUtf8("NewProject"))
NewProject.resize(572, 232)
NewProject.setWindowTitle(QtGui.QApplication.translate("NewProject", "New Project", None, QtGui.QApplication.UnicodeUTF8))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/logo_icon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
NewProject.setWindowIcon(icon)
self.verticalLayout = QtGui.QVBoxLayout(NewProject)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(NewProject)
self.groupBox.setTitle(QtGui.QApplication.translate("NewProject", "Project settings", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout = QtGui.QGridLayout(self.groupBox)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setText(QtGui.QApplication.translate("NewProject", "Project name:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.ProjectName = QtGui.QLineEdit(self.groupBox)
self.ProjectName.setText(_fromUtf8(""))
self.ProjectName.setObjectName(_fromUtf8("ProjectName"))
self.gridLayout.addWidget(self.ProjectName, 0, 1, 1, 2)
self.label = QtGui.QLabel(self.groupBox)
self.label.setText(QtGui.QApplication.translate("NewProject", "Project directory:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 1, 0, 1, 1)
self.ProjectPath = QtGui.QLineEdit(self.groupBox)
self.ProjectPath.setObjectName(_fromUtf8("ProjectPath"))
self.gridLayout.addWidget(self.ProjectPath, 1, 1, 1, 1)
self.NewProject_browser = QtGui.QToolButton(self.groupBox)
self.NewProject_browser.setText(QtGui.QApplication.translate("NewProject", "...", None, QtGui.QApplication.UnicodeUTF8))
self.NewProject_browser.setToolButtonStyle(QtCore.Qt.ToolButtonTextOnly)
self.NewProject_browser.setObjectName(_fromUtf8("NewProject_browser"))
self.gridLayout.addWidget(self.NewProject_browser, 1, 2, 1, 1)
self.checkBox_WorkdirFiles = QtGui.QCheckBox(self.groupBox)
self.checkBox_WorkdirFiles.setText(QtGui.QApplication.translate("NewProject", "Save nvrams including EtherSwitch VLANs and crypto keys", None, QtGui.QApplication.UnicodeUTF8))
self.checkBox_WorkdirFiles.setChecked(False)
self.checkBox_WorkdirFiles.setObjectName(_fromUtf8("checkBox_WorkdirFiles"))
self.gridLayout.addWidget(self.checkBox_WorkdirFiles, 2, 0, 1, 3)
self.unbaseImages = QtGui.QCheckBox(self.groupBox)
self.unbaseImages.setText(QtGui.QApplication.translate("NewProject", "Unbase images when saving (required to share a project that uses Qemu)", None, QtGui.QApplication.UnicodeUTF8))
self.unbaseImages.setObjectName(_fromUtf8("unbaseImages"))
self.gridLayout.addWidget(self.unbaseImages, 3, 0, 1, 2)
self.checkBox_SaveCaptures = QtGui.QCheckBox(self.groupBox)
self.checkBox_SaveCaptures.setText(QtGui.QApplication.translate("NewProject", "Save traffic captures", None, QtGui.QApplication.UnicodeUTF8))
self.checkBox_SaveCaptures.setObjectName(_fromUtf8("checkBox_SaveCaptures"))
self.gridLayout.addWidget(self.checkBox_SaveCaptures, 4, 0, 1, 2)
self.verticalLayout.addWidget(self.groupBox)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.pushButtonOpenProject = QtGui.QPushButton(NewProject)
self.pushButtonOpenProject.setText(QtGui.QApplication.translate("NewProject", "&Open a Project", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonOpenProject.setObjectName(_fromUtf8("pushButtonOpenProject"))
self.horizontalLayout.addWidget(self.pushButtonOpenProject)
self.pushButtonRecentFiles = QtGui.QPushButton(NewProject)
self.pushButtonRecentFiles.setText(QtGui.QApplication.translate("NewProject", "&Recent Files", None, QtGui.QApplication.UnicodeUTF8))
self.pushButtonRecentFiles.setObjectName(_fromUtf8("pushButtonRecentFiles"))
self.horizontalLayout.addWidget(self.pushButtonRecentFiles)
spacerItem = QtGui.QSpacerItem(168, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.buttonBox = QtGui.QDialogButtonBox(NewProject)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.horizontalLayout.addWidget(self.buttonBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.retranslateUi(NewProject)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), NewProject.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), NewProject.reject)
QtCore.QMetaObject.connectSlotsByName(NewProject)
def retranslateUi(self, NewProject):
pass
import svg_resources_rc
| dlintott/gns3 | src/GNS3/Ui/Form_NewProject.py | Python | gpl-2.0 | 5,837 |
# $Id$
#
import inc_const as const
PJSUA = ["--null-audio --max-calls=1 --auto-answer=200 --no-tcp --srtp-secure 0 --use-srtp 2 --srtp-keying=0"]
PJSUA_EXPECTS = [[0, "SRTP uses keying method SDES", ""],
[0, "SRTP uses keying method DTLS-SRTP", ""]
]
| asterisk/pjproject | tests/pjsua/scripts-sipp/uac-srtp-sdes-reinv-dtls.py | Python | gpl-2.0 | 258 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 University of Dundee & Open Microscopy Environment
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from builtins import str
from builtins import range
from builtins import object
import os
import uuid
import shutil
import logging
import tempfile
from scc.git import get_github, get_token_or_user
from subprocess import Popen
sandbox_url = "https://github.com/ome/snoopys-sandbox.git"
class SandboxTest(object):
def setup_method(self, method):
# Basic logging configuration so if a test fails we can see
# the statements at WARN or ERROR at least.
logging.basicConfig()
self.method = method.__name__
self.cwd = os.getcwd()
self.token = get_token_or_user(local=False)
self.gh = get_github(self.token, dont_ask=True)
self.user = self.gh.get_login()
self.path = tempfile.mkdtemp("", "sandbox-", ".")
self.path = os.path.abspath(self.path)
try:
with open(os.devnull, 'w') as dev_null:
p = Popen(["git", "clone", "-q", sandbox_url, self.path],
stdout=dev_null, stderr=dev_null)
assert p.wait() == 0
self.sandbox = self.gh.git_repo(self.path)
self.origin_remote = "origin"
except Exception:
try:
shutil.rmtree(self.path)
finally:
# Return to cwd regardless.
os.chdir(self.cwd)
raise
# If we succeed, then we change to this dir.
os.chdir(self.path)
def shortDescription(self):
return None
def init_submodules(self):
"""
Fetch submodules after cloning the repository
"""
try:
with open(os.devnull, 'w') as dev_null:
p = Popen(["git", "submodule", "update", "--init"],
stdout=dev_null, stderr=dev_null)
assert p.wait() == 0
except Exception:
os.chdir(self.path)
raise
def uuid(self):
"""
Return a string representing a uuid.uuid4
"""
return str(uuid.uuid4())
def fake_branch(self, head="master", commits=None):
"""
Return a local branch with a list of commits, defaults to a single
commit adding a unique file
"""
name = self.uuid()
if commits is None:
commits = [(name, "hi")]
self.sandbox.new_branch(name, head=head)
for n in range(len(commits)):
fname, txt = commits[n]
fname = os.path.join(self.path, fname)
with open(fname, 'w') as f:
f.write(txt)
self.sandbox.add(fname)
self.sandbox.commit("%d: Writing %s" % (n, name))
self.sandbox.get_status()
return name
def add_remote(self):
"""
Add the remote of the authenticated Github user
"""
if self.user not in self.sandbox.list_remotes():
remote_url = "https://%s:x-oauth-basic@github.com/%s/%s.git" \
% (self.token, self.user, self.sandbox.origin.name)
self.sandbox.add_remote(self.user, remote_url)
def rename_origin_remote(self, new_name):
"""
Rename the remote used for the upstream repository
"""
self.sandbox.call("git", "remote", "rename", self.origin_remote,
new_name)
self.origin_remote = new_name
def push_branch(self, branch):
"""
Push a local branch to GitHub
"""
self.add_remote()
self.sandbox.push_branch(branch, remote=self.user)
def open_pr(self, branch, base, description=None):
"""
Push a local branch and open a PR against the selected base
"""
self.push_branch(branch)
if description is None:
description = ("This is a call to Sandbox.open_pr by %s" %
self.method)
new_pr = self.sandbox.origin.open_pr(
title="test %s" % branch,
description=description,
base=base,
head="%s:%s" % (self.user, branch))
return new_pr
def teardown_method(self, method):
try:
self.sandbox.cleanup()
finally:
try:
shutil.rmtree(self.path)
finally:
# Return to cwd regardless.
os.chdir(self.cwd)
| sbesson/snoopycrimecop | test/integration/Sandbox.py | Python | gpl-2.0 | 5,211 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2010 Zuza Software Foundation
#
# This file is part of Virtaal.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import gtk
import gobject
import pango
from translate.lang import factory
from virtaal.common import pan_app
from virtaal.support.simplegeneric import generic
from virtaal.views import markup, rendering
from virtaal.views.theme import current_theme
@generic
def compute_optimal_height(widget, width):
raise NotImplementedError()
@compute_optimal_height.when_type(gtk.Widget)
def gtk_widget_compute_optimal_height(widget, width):
pass
@compute_optimal_height.when_type(gtk.Container)
def gtk_container_compute_optimal_height(widget, width):
if not widget.props.visible:
return
for child in widget.get_children():
compute_optimal_height(child, width)
@compute_optimal_height.when_type(gtk.Table)
def gtk_table_compute_optimal_height(widget, width):
for child in widget.get_children():
# width / 2 because we use half of the available width
compute_optimal_height(child, width / 2)
@compute_optimal_height.when_type(gtk.TextView)
def gtk_textview_compute_optimal_height(widget, width):
if not widget.props.visible:
return
buf = widget.get_buffer()
# For border calculations, see gtktextview.c:gtk_text_view_size_request in the GTK source
border = 2 * widget.border_width - 2 * widget.parent.border_width
if widget.style_get_property("interior-focus"):
border += 2 * widget.style_get_property("focus-line-width")
buftext = buf.props.text
# A good way to test height estimation is to use it for all units and
# compare the reserved space to the actual space needed to display a unit.
# To use height estimation for all units (not just empty units), use:
#if True:
if not buftext:
text = getattr(widget, '_source_text', u"")
if text:
lang = factory.getlanguage(pan_app.settings.language["targetlang"])
buftext = lang.alter_length(text)
buftext = markup.escape(buftext)
_w, h = rendering.make_pango_layout(widget, buftext, width - border).get_pixel_size()
if h == 0:
# No idea why this bug happens, but it often happens for the first unit
# directly after the file is opened. For now we try to guess a more
# useful default than 0. This should look much better than 0, at least.
h = 28
parent = widget.parent
if isinstance(parent, gtk.ScrolledWindow) and parent.get_shadow_type() != gtk.SHADOW_NONE:
border += 2 * parent.rc_get_style().ythickness
widget.parent.set_size_request(-1, h + border)
@compute_optimal_height.when_type(gtk.Label)
def gtk_label_compute_optimal_height(widget, width):
if widget.get_text().strip() == "":
widget.set_size_request(width, 0)
else:
_w, h = rendering.make_pango_layout(widget, widget.get_label(), width).get_pixel_size()
widget.set_size_request(width, h)
class StoreCellRenderer(gtk.GenericCellRenderer):
"""
Cell renderer for a unit based on the C{UnitRenderer} class from Virtaal's
pre-MVC days.
"""
__gtype_name__ = "StoreCellRenderer"
__gproperties__ = {
"unit": (
object,
"The unit",
"The unit that this renderer is currently handling",
gobject.PARAM_READWRITE
),
"editable": (
bool,
"editable",
"A boolean indicating whether this unit is currently editable",
False,
gobject.PARAM_READWRITE
),
}
__gsignals__ = {
"editing-done": (
gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE,
(gobject.TYPE_STRING, gobject.TYPE_BOOLEAN, gobject.TYPE_BOOLEAN)
),
"modified": (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ())
}
ROW_PADDING = 10
"""The number of pixels between rows."""
# INITIALIZERS #
def __init__(self, view):
gtk.GenericCellRenderer.__init__(self)
self.set_property('mode', gtk.CELL_RENDERER_MODE_EDITABLE)
self.view = view
self.__unit = None
self.editable = False
self.source_layout = None
self.target_layout = None
# ACCESSORS #
def _get_unit(self):
return self.__unit
def _set_unit(self, value):
if value.isfuzzy():
self.props.cell_background = current_theme['fuzzy_row_bg']
self.props.cell_background_set = True
else:
self.props.cell_background_set = False
self.__unit = value
unit = property(_get_unit, _set_unit, None, None)
# INTERFACE METHODS #
def do_set_property(self, pspec, value):
setattr(self, pspec.name, value)
def do_get_property(self, pspec):
return getattr(self, pspec.name)
def do_get_size(self, widget, _cell_area):
#TODO: store last unitid and computed dimensions
width = widget.get_toplevel().get_allocation().width - 32
if width < -1:
width = -1
if self.editable:
editor = self.view.get_unit_celleditor(self.unit)
editor.set_size_request(width, -1)
editor.show()
compute_optimal_height(editor, width)
parent_height = widget.get_allocation().height
if parent_height < -1:
parent_height = widget.size_request()[1]
if parent_height > 0:
self.check_editor_height(editor, width, parent_height)
_width, height = editor.size_request()
height += self.ROW_PADDING
else:
height = self.compute_cell_height(widget, width)
#height = min(height, 600)
y_offset = self.ROW_PADDING / 2
return 0, y_offset, width, height
def do_start_editing(self, _event, tree_view, path, _bg_area, cell_area, _flags):
"""Initialize and return the editor widget."""
editor = self.view.get_unit_celleditor(self.unit)
editor.set_size_request(cell_area.width, cell_area.height)
if not getattr(self, '_editor_editing_done_id', None):
self._editor_editing_done_id = editor.connect("editing-done", self._on_editor_done)
if not getattr(self, '_editor_modified_id', None):
self._editor_modified_id = editor.connect("modified", self._on_modified)
return editor
def on_render(self, window, widget, _background_area, cell_area, _expose_area, _flags):
if self.editable:
return True
x_offset, y_offset, width, _height = self.do_get_size(widget, cell_area)
x = cell_area.x + x_offset
y = cell_area.y + y_offset
source_x = x
target_x = x
if widget.get_direction() == gtk.TEXT_DIR_LTR:
target_x += width/2
else:
source_x += (width/2) + 10
widget.get_style().paint_layout(window, gtk.STATE_NORMAL, False,
cell_area, widget, '', source_x, y, self.source_layout)
widget.get_style().paint_layout(window, gtk.STATE_NORMAL, False,
cell_area, widget, '', target_x, y, self.target_layout)
# METHODS #
def _get_pango_layout(self, widget, text, width, font_description):
'''Gets the Pango layout used in the cell in a TreeView widget.'''
# We can't use widget.get_pango_context() because we'll end up
# overwriting the language and font settings if we don't have a
# new one
layout = pango.Layout(widget.create_pango_context())
layout.set_font_description(font_description)
layout.set_wrap(pango.WRAP_WORD_CHAR)
layout.set_width(width * pango.SCALE)
#XXX - plurals?
text = text or u""
layout.set_markup(markup.markuptext(text))
return layout
def compute_cell_height(self, widget, width):
lang_controller = self.view.controller.main_controller.lang_controller
srclang = lang_controller.source_lang.code
tgtlang = lang_controller.target_lang.code
self.source_layout = self._get_pango_layout(widget, self.unit.source, width / 2,
rendering.get_source_font_description())
self.source_layout.get_context().set_language(rendering.get_language(srclang))
self.target_layout = self._get_pango_layout(widget, self.unit.target, width / 2,
rendering.get_target_font_description())
self.target_layout.get_context().set_language(rendering.get_language(tgtlang))
# This makes no sense, but has the desired effect to align things correctly for
# both LTR and RTL languages:
if widget.get_direction() == gtk.TEXT_DIR_RTL:
self.source_layout.set_alignment(pango.ALIGN_RIGHT)
self.target_layout.set_alignment(pango.ALIGN_RIGHT)
self.target_layout.set_auto_dir(False)
_layout_width, source_height = self.source_layout.get_pixel_size()
_layout_width, target_height = self.target_layout.get_pixel_size()
return max(source_height, target_height) + self.ROW_PADDING
def check_editor_height(self, editor, width, parentheight):
notesheight = 0
for note in editor._widgets['notes'].values():
notesheight += note.size_request()[1]
maxheight = parentheight - notesheight
if maxheight < 0:
return
visible_textboxes = []
for textbox in (editor._widgets['sources'] + editor._widgets['targets']):
if textbox.props.visible:
visible_textboxes.append(textbox)
max_tb_height = maxheight / len(visible_textboxes)
for textbox in visible_textboxes:
if textbox.props.visible and textbox.parent.size_request()[1] > max_tb_height:
textbox.parent.set_size_request(-1, max_tb_height)
#logging.debug('%s.set_size_request(-1, %d)' % (textbox.parent, max_tb_height))
# EVENT HANDLERS #
def _on_editor_done(self, editor):
self.emit("editing-done", editor.get_data("path"), editor.must_advance, editor.is_modified())
return True
def _on_modified(self, widget):
self.emit("modified")
| unho/virtaal | virtaal/views/widgets/storecellrenderer.py | Python | gpl-2.0 | 10,840 |
# $Id$
#
import inc_const as const
PJSUA = ["--null-audio --max-calls=1 --no-tcp $SIPP_URI"]
PJSUA_EXPECTS = [[0, "Audio updated", ""]]
| ismangil/pjproject | tests/pjsua/scripts-sipp/uas-answer-183-without-to-tag.py | Python | gpl-2.0 | 138 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0019_auto_20160518_0048'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='last_transaction',
field=models.DateTimeField(null=True, blank=True),
),
]
| JulianVolodia/Politikon | accounts/migrations/0020_userprofile_last_transaction.py | Python | gpl-2.0 | 428 |
# -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Template for the external collections search."""
__revision__ = "$Id$"
import cgi
from invenio.config import CFG_SITE_LANG
from invenio.messages import gettext_set_language
from invenio.urlutils import create_html_link
class Template:
"""Template class for the external collection search. To be loaded with template.load()"""
def __init__(self):
pass
def external_collection_seealso_box(self, lang, links,
prolog_start='<table class="externalcollectionsbox"><tr><th colspan="2" class="externalcollectionsboxheader">',
prolog_end='</th></tr><tr><td class="externalcollectionsboxbody">',
column_separator='</td><td class="externalcollectionsboxbody">',
link_separator= '<br />', epilog='</td></tr></table>'):
"""Creates the box that proposes links to other useful search engines like Google.
lang: string - The language to display in
links: list of string - List of links to display in the box
prolog_start, prolog_end, column_separator, link_separator, epilog': strings -
default HTML code for the specified position in the box"""
_ = gettext_set_language(lang)
out = ""
if links:
out += '<a name="externalcollectionsbox"></a>'
out += prolog_start
out += _("Haven't found what you were looking for? Try your search on other servers:")
out += prolog_end
nb_out_links_in_one_column = len(links)/2 + len(links) % 2
out += link_separator.join(links[:nb_out_links_in_one_column])
out += column_separator
out += link_separator.join(links[nb_out_links_in_one_column:])
out += epilog
return out
def external_collection_overview(self, lang=CFG_SITE_LANG, engine_list=()):
"""Prints results overview box with links to particular collections below.
lang: The language to display
engine_list: The external engines to be used"""
if len(engine_list) < 1:
return ""
_ = gettext_set_language(lang)
out = """
<table class="externalcollectionsresultsbox">
<thead>
<tr>
<th class="externalcollectionsresultsboxheader"><strong>%s</strong></th>
</tr>
</thead>
<tbody>
<tr>
<td class="externalcollectionsresultsboxbody"> """ % _("External collections results overview:")
for engine in engine_list:
internal_name = get_link_name(engine.name)
name = _(engine.name)
out += """<strong><a href="#%(internal_name)s">%(name)s</a></strong><br />""" % locals()
out += """
</td>
</tr>
</tbody>
</table>
"""
return out
def print_info_line(req,
html_external_engine_name_box,
html_external_engine_nb_results_box,
html_external_engine_nb_seconds_box):
"""Print on req an information line about results of an external collection search."""
req.write('<table class="externalcollectionsresultsbox"><tr>')
req.write('<td class="externalcollectionsresultsboxheader">')
req.write('<big><strong>' + \
html_external_engine_name_box + \
'</strong></big>')
req.write(' ')
req.write(html_external_engine_nb_results_box)
req.write('</td><td class="externalcollectionsresultsboxheader" width="20%" align="right">')
req.write('<small>' + \
html_external_engine_nb_seconds_box + \
'</small>')
req.write('</td></tr></table><br />')
def print_timeout(req, lang, engine, name, url):
"""Print info line for timeout."""
_ = gettext_set_language(lang)
req.write('<a name="%s"></a>' % get_link_name(engine.name))
print_info_line(req,
create_html_link(url, {}, name, {}, False, False),
'',
_('Search timed out.'))
message = _("The external search engine has not responded in time. You can check its results here:")
req.write(message + ' ' + create_html_link(url, {}, name, {}, False, False) + '<br />')
def get_link_name(name):
"""Return a hash string for the string name."""
return hex(abs(name.__hash__()))
def print_results(req, lang, pagegetter, infos, current_time, print_search_info=True, print_body=True):
"""Print results of a given search engine.
current_time is actually the duration, expressed in seconds of execution of request.
"""
_ = gettext_set_language(lang)
url = infos[0]
engine = infos[1]
internal_name = get_link_name(engine.name)
name = _(engine.name)
base_url = engine.base_url
results = engine.parser.parse_and_get_results(pagegetter.data)
html_tit = make_url(name, base_url)
if print_search_info:
num = format_number(engine.parser.parse_num_results())
if num:
if num == '0':
html_num = _('No results found.')
html_sec = ''
else:
html_num = '<strong>' + \
make_url(_('%s results found') % num, url) + \
'</strong>'
html_sec = '(' + _('%s seconds') % ('%2.2f' % current_time) + ')'
else:
html_num = _('No results found.')
html_sec = ''
req.write('<a name="%(internal_name)s"></a>' % locals())
print_info_line(req,
html_tit,
html_num,
html_sec)
if print_body:
for result in results:
req.write(result.html + '<br />')
if not results:
req.write(_('No results found.') + '<br />')
def make_url(name, url):
if url:
return '<a href="' + cgi.escape(url) + '">' + name + '</a>'
else:
return name
def format_number(num, separator=','):
"""Format a number by separating thousands with a separator (by default a comma)
>>> format_number(10)
'10'
>>> format_number(10000)
'10,000'
>>> format_number(' 000213212424249 ', '.')
'213.212.424.249'
"""
result = ""
try:
num = int(num)
except:
return None
if num == 0:
return '0'
while num > 0:
part = num % 1000
num = num / 1000
result = "%03d" % part + separator + result
return result.strip('0').strip(separator)
| jmacmahon/invenio | modules/websearch/lib/websearch_external_collections_templates.py | Python | gpl-2.0 | 7,238 |
# Rekall Memory Forensics
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Authors:
# Michael Cohen <scudette@google.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""This file implements an xls renderer based on the openpyxl project.
We produce xls (Excel spreadsheet files) with the output from Rekall plugins.
"""
import time
import openpyxl
from openpyxl import styles
from openpyxl.styles import colors
from openpyxl.styles import fills
from rekall import utils
from rekall.ui import renderer
from rekall.ui import text
# pylint: disable=unexpected-keyword-arg,no-value-for-parameter
# pylint: disable=redefined-outer-name
HEADER_STYLE = styles.Style(font=styles.Font(bold=True))
SECTION_STYLE = styles.Style(
fill=styles.PatternFill(
fill_type=fills.FILL_SOLID, start_color=styles.Color(colors.RED)))
FORMAT_STYLE = styles.Style(
alignment=styles.Alignment(vertical="top", wrap_text=False))
class XLSObjectRenderer(renderer.ObjectRenderer):
"""By default the XLS renderer delegates to the text renderer."""
renders_type = "object"
renderers = ["XLSRenderer"]
STYLE = None
def _GetDelegateObjectRenderer(self, item):
return self.ForTarget(item, "TextRenderer")(
session=self.session, renderer=self.renderer.delegate_text_renderer)
def RenderHeader(self, worksheet, column):
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = column.name
cell.style = HEADER_STYLE
# Advance the pointer by 1 cell.
worksheet.current_column += 1
def RenderCell(self, value, worksheet, **options):
# By default just render a single value into the current cell.
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = self.GetData(value, **options)
if self.STYLE:
cell.style = self.STYLE
# Advance the pointer by 1 cell.
worksheet.current_column += 1
def GetData(self, value, **options):
if isinstance(value, (int, float, long)):
return value
return unicode(self._GetDelegateObjectRenderer(value).render_row(
value, **options))
class XLSColumn(text.TextColumn):
def __init__(self, type=None, table=None, renderer=None, session=None,
**options):
super(XLSColumn, self).__init__(table=table, renderer=renderer,
session=session, **options)
if type:
self.object_renderer = self.renderer.get_object_renderer(
type=type, target_renderer="XLSRenderer", **options)
class XLSTable(text.TextTable):
column_class = XLSColumn
def render_header(self):
current_ws = self.renderer.current_ws
for column in self.columns:
if column.object_renderer:
object_renderer = column.object_renderer
else:
object_renderer = XLSObjectRenderer(
session=self.session, renderer=self.renderer)
object_renderer.RenderHeader(self.renderer.current_ws, column)
current_ws.current_row += 1
current_ws.current_column = 1
def render_row(self, row=None, highlight=None, **options):
merged_opts = self.options.copy()
merged_opts.update(options)
# Get each column to write its own header.
current_ws = self.renderer.current_ws
for item in row:
# Get the object renderer for the item.
object_renderer = self.renderer.get_object_renderer(
target=item, type=merged_opts.get("type"), **merged_opts)
object_renderer.RenderCell(item, current_ws, **options)
current_ws.current_row += 1
current_ws.current_column = 1
class XLSRenderer(renderer.BaseRenderer):
"""A Renderer for xls files."""
name = "xls"
table_class = XLSTable
tablesep = ""
def __init__(self, output=None, **kwargs):
super(XLSRenderer, self).__init__(**kwargs)
# Make a single delegate text renderer for reuse. Most of the time we
# will just replicate the output from the TextRenderer inside the
# spreadsheet cell.
self.delegate_text_renderer = text.TextRenderer(session=self.session)
self.output = output or self.session.GetParameter("output")
# If no output filename was give, just make a name based on the time
# stamp.
if self.output == None:
self.output = "%s.xls" % time.ctime()
try:
self.wb = openpyxl.load_workbook(self.output)
self.current_ws = self.wb.create_sheet()
except IOError:
self.wb = openpyxl.Workbook()
self.current_ws = self.wb.active
def start(self, plugin_name=None, kwargs=None):
super(XLSRenderer, self).start(plugin_name=plugin_name, kwargs=kwargs)
# Make a new worksheet for this run.
if self.current_ws is None:
self.current_ws = self.wb.create_sheet()
ws = self.current_ws
ws.title = plugin_name or ""
ws.current_row = 1
ws.current_column = 1
return self
def flush(self):
super(XLSRenderer, self).flush()
self.current_ws = None
# Write the spreadsheet to a file.
self.wb.save(self.output)
def section(self, name=None, **_):
ws = self.current_ws
for i in range(10):
cell = ws.cell(row=ws.current_row, column=i + 1)
if i == 0:
cell.value = name
cell.style = SECTION_STYLE
ws.current_row += 1
ws.current_column = 1
def format(self, formatstring, *data):
worksheet = self.current_ws
if "%" in formatstring:
data = formatstring % data
else:
data = formatstring.format(*data)
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = data
cell.style = FORMAT_STYLE
worksheet.current_column += 1
if "\n" in data:
worksheet.current_row += 1
worksheet.current_column = 1
def table_header(self, *args, **options):
super(XLSRenderer, self).table_header(*args, **options)
self.table.render_header()
# Following here are object specific renderers.
class XLSEProcessRenderer(XLSObjectRenderer):
"""Expands an EPROCESS into three columns (address, name and PID)."""
renders_type = "_EPROCESS"
def RenderHeader(self, worksheet, column):
for heading in ["_EPROCESS", "Name", "PID"]:
cell = worksheet.cell(
row=worksheet.current_row, column=worksheet.current_column)
cell.value = heading
cell.style = HEADER_STYLE
worksheet.current_column += 1
def RenderCell(self, item, worksheet, **options):
for value in ["%#x" % item.obj_offset, item.name, item.pid]:
object_renderer = self.ForTarget(value, self.renderer)(
session=self.session, renderer=self.renderer, **options)
object_renderer.RenderCell(value, worksheet, **options)
class XLSStringRenderer(XLSObjectRenderer):
renders_type = "String"
def GetData(self, item, **_):
return utils.SmartStr(item)
class XLSStructRenderer(XLSObjectRenderer):
"""Hex format struct's offsets."""
renders_type = "Struct"
def GetData(self, item, **_):
return "%#x" % item.obj_offset
class XLSPointerRenderer(XLSObjectRenderer):
"""Renders the address of the pointer target as a hex string."""
renders_type = "Pointer"
def GetData(self, item, **_):
result = item.v()
if result == None:
return "-"
return "%#x" % result
class XLSNativeTypeRenderer(XLSObjectRenderer):
"""Renders native types as python objects."""
renders_type = "NativeType"
def GetData(self, item, **options):
result = item.v()
if result != None:
return result
class XLS_UNICODE_STRING_Renderer(XLSNativeTypeRenderer):
renders_type = "_UNICODE_STRING"
class XLSNoneObjectRenderer(XLSObjectRenderer):
renders_type = "NoneObject"
def GetData(self, item, **_):
_ = item
return "-"
class XLSDateTimeRenderer(XLSObjectRenderer):
"""Renders timestamps as python datetime objects."""
renders_type = "UnixTimeStamp"
STYLE = styles.Style(number_format='MM/DD/YYYY HH:MM:SS')
def GetData(self, item, **options):
if item.v() == 0:
return None
return item.as_datetime()
| dsweet04/rekall | rekall-core/rekall/plugins/renderers/xls.py | Python | gpl-2.0 | 9,365 |
import pytest
import sys
import logging
from sqlalchemy import create_engine
import zk.model.meta as zkmeta
import zkpylons.model.meta as pymeta
from zkpylons.config.routing import make_map
from paste.deploy import loadapp
from webtest import TestApp
from paste.fixture import Dummy_smtplib
from .fixtures import ConfigFactory
from ConfigParser import ConfigParser
# Get settings from config file, only need it once
ini = ConfigParser()
ini_filename = "test.ini"
ini.read(ini_filename)
# Logging displayed by passing -s to pytest
#logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
@pytest.yield_fixture
def map():
config = {
'pylons.paths' : { 'controllers' : None },
'debug' : True,
}
yield make_map(config)
@pytest.yield_fixture
def app():
wsgiapp = loadapp('config:'+ini_filename, relative_to=".")
app = TestApp(wsgiapp)
yield app
class DoubleSession(object):
# There is an issue with the zkpylons -> zk migration
# Some files use zk.model, which uses zk.model.meta.Session
# Some files use zkpylons.model, which uses zkpylons.model.meta.Session
# Some files use relative paths, which means you can kinda guess at it
# The best way around this is to configure both Session objects
# But then operations frequently have to be applied to both
# This class wraps operations needed for testing, and applies both
def __init__(self, session1, session2):
self.s1 = session1
self.s2 = session2
def remove(self):
self.s1.remove()
self.s2.remove()
def configure(self, engine):
self.s1.configure(bind=engine)
self.s2.configure(bind=engine)
self.s1.configure(autoflush=False)
self.s2.configure(autoflush=False)
def commit(self):
self.s1.commit()
self.s2.commit()
# TODO: Maybe expire_all or refresh would be better
def expunge_all(self):
self.s1.expunge_all()
self.s2.expunge_all()
def query(self, cls):
return self.s1.query(cls)
def execute(self, *args, **kwargs):
return self.s1.execute(*args, **kwargs)
base_general_config = {
'sponsors' : {"top":[],"slideshow":[]},
'account_creation' : True,
'cfp_status' : "open",
'conference_status' : "open",
}
base_rego_config = {
'personal_info' : {"phone":"yes","home_address":"yes"}
}
@pytest.yield_fixture
def db_session():
# Set up SQLAlchemy to provide DB access
dsess = DoubleSession(zkmeta.Session, pymeta.Session)
# Clean up old sessions if they exist
dsess.remove()
engine = create_engine(ini.get("app:main", "sqlalchemy.url"))
# Drop all data to establish known state, mostly to prevent primary-key conflicts
engine.execute("drop schema if exists public cascade")
engine.execute("create schema public")
zkmeta.Base.metadata.create_all(engine)
dsess.configure(engine)
# Create basic config values, to allow basic pages to render
for key, val in base_general_config.iteritems():
ConfigFactory(key=key, value=val)
for key, val in base_rego_config.iteritems():
ConfigFactory(category='rego', key=key, value=val)
dsess.commit()
# Run the actual test
yield dsess
# No rollback, for functional tests we have to actually commit to DB
@pytest.yield_fixture
def smtplib():
Dummy_smtplib.install()
yield Dummy_smtplib
if Dummy_smtplib.existing:
Dummy_smtplib.existing.reset()
| zookeepr/zookeepr | zkpylons/tests/functional/conftest.py | Python | gpl-2.0 | 3,543 |
#!/usr/bin/env python
# Copyright 2007, Michael J. Harms
# This program is distributed under General Public License v. 3. See the file
# COPYING for a copy of the license.
__author__ = "Michael J. Harms"
__date__ = "070709"
__description__ = \
"""
pdb_dist-filter.py
Takes pdb file and calculates the distance between "residue" "atom" and all
other atoms of this type in the pdb file with "column" matching "select".
"""
from math import sqrt
def extractCoor(line):
"""
Take a line out of a pdb and extract coordinates.
"""
return [float(line[30+8*i:38+8*i]) for i in range(3)]
def distFilter(pdb,residue,atom="N",column=[60,66],select=" NA"):
"""
Calculate the distance between "residue" "atom" and the "atom" of residues
that have column defined by "column" == "select". Default is to compare
nitrogen of "residue" to nitrogens or residues with b-factor == NA.
"""
# Make sure atom entry is in correct format
atom = "%-3s" % (atom.strip())
# Grab only "atom" lines out of pdb
pdb = [l for l in pdb if l[0:4] == "ATOM" and l[13:16] == atom]
if len(pdb) == 0:
err = "pdb file does not contain any atoms of type \"%s\"" % atom
raise IOError(err)
# Pull residue coordinates
res_coord = [extractCoor(l) for l in pdb if l[22:26] == "%4i" % residue][0]
# Pull selected residue coordinates
try:
na_coord = [extractCoor(l) for l in pdb
if l[column[0]:column[1]] == select]
except IndexError:
err = "Invalid column defined by line[%i:%i]" % (column[0],column[1])
raise IOErro(err)
if len(na_coord) == 0:
err = "Column line[%i:%i] does not contain any \"%s\" entries!" % \
(column[0],column[1],select)
raise IOError(err)
# Calculate distances
dist = []
for c in na_coord:
dist.append(sqrt(sum([(c[i]-res_coord[i])**2 for i in range(3)])))
return dist
def main():
"""
Takes command line arguments and calls distFilter
"""
from helper import cmdline
# Parse command line
cmdline.initializeParser(__description__,__date__)
cmdline.addOption(short_flag="r",
long_flag="residue",
action="store",
default=None,
help="residue for comparison (REQUIRED).")
cmdline.addOption(short_flag="c",
long_flag="column_input",
action="store",
default=[60,66," NA"],
help="X Y SELECT; take lines in which column " + \
"defined by line[X:Y] matches SELECT",
nargs=3)
cmdline.addOption(short_flag="a",
long_flag="atom",
default="N",
action="store",
help="Atom type to compare")
file_list, options = cmdline.parseCommandLine()
# Make sure that a residue is specified
if options.residue == None:
err = "Residue must be specified with -r flag!"
cmdline.parser.error(err)
# Make sure arguments are sane
try:
residue = int(options.residue)
column = [int(options.column_input[0]),int(options.column_input[1])]
select = options.column_input[2]
atom = options.atom
except ValueError:
err = "Mangled arguments!"
cmdline.parser.error(err)
# Run script for all files in file_list
for pdb_file in file_list:
# Read in pdb file
f = open(pdb_file,'r')
pdb = f.readlines()
f.close()
# Perform analysis
dist = distFilter(pdb,residue,atom,column,select)
# dump output to stdout
out = ["%10i%10.3F\n" % (i,d) for i, d in enumerate(dist)]
out.insert(0,"%10s%10s\n" % (" ","dist"))
print "".join(out)
# Parse command line if called from command line
if __name__ == "__main__":
main()
| AndreaEdwards/pdbtools | pdb_dist-filter.py | Python | gpl-3.0 | 4,013 |
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
import frappe
import erpnext
test_dependencies = ["Employee", "Leave Type", "Leave Policy"]
class TestLeavePeriod(unittest.TestCase):
pass
def create_leave_period(from_date, to_date, company=None):
leave_period = frappe.db.get_value('Leave Period',
dict(company=company or erpnext.get_default_company(),
from_date=from_date,
to_date=to_date,
is_active=1), 'name')
if leave_period:
return frappe.get_doc("Leave Period", leave_period)
leave_period = frappe.get_doc({
"doctype": "Leave Period",
"company": company or erpnext.get_default_company(),
"from_date": from_date,
"to_date": to_date,
"is_active": 1
}).insert()
return leave_period
| mhbu50/erpnext | erpnext/hr/doctype/leave_period/test_leave_period.py | Python | gpl-3.0 | 775 |
# -*- encoding: utf-8 -*-
"""
Usage::
hammer capsule [OPTIONS] SUBCOMMAND [ARG] ...
Parameters::
SUBCOMMAND subcommand
[ARG] ... subcommand arguments
Subcommands::
content Manage the capsule content
create Create a capsule
delete Delete a capsule
import-classes Import puppet classes from puppet Capsule.
info Show a capsule
list List all capsules
refresh-features Refresh capsule features
update Update a capsule
"""
from robottelo.cli.base import Base
class Capsule(Base):
"""
Manipulates Foreman's capsule.
"""
command_base = 'capsule'
@classmethod
def content_add_lifecycle_environment(cls, options):
"""Add lifecycle environments to the capsule."""
cls.command_sub = 'content add-lifecycle-environment'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def content_available_lifecycle_environments(cls, options):
"""List the lifecycle environments not attached to the capsule."""
cls.command_sub = 'content available-lifecycle-environments'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def content_info(cls, options):
"""Get current capsule synchronization status."""
cls.command_sub = 'content info'
result = cls.execute(
cls._construct_command(options), output_format='json')
return result
@classmethod
def content_lifecycle_environments(cls, options):
"""List the lifecycle environments attached to the capsule."""
cls.command_sub = 'content lifecycle-environments'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def content_remove_lifecycle_environment(cls, options):
"""Remove lifecycle environments from the capsule."""
cls.command_sub = 'content remove-lifecycle-environment'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def content_synchronization_status(cls, options):
"""Get current capsule synchronization status."""
cls.command_sub = 'content synchronization-status'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def content_synchronize(cls, options):
"""Synchronize the content to the capsule."""
cls.command_sub = 'content synchronize'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def import_classes(cls, options):
"""Import puppet classes from puppet Capsule."""
cls.command_sub = 'import-classes'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def refresh_features(cls, options):
"""Refresh capsule features."""
cls.command_sub = 'refresh-features'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
| ares/robottelo | robottelo/cli/capsule.py | Python | gpl-3.0 | 3,537 |
#!/usr/bin/env python
import freenect
import cv
import numpy as np
cv.NamedWindow('Depth')
cv.NamedWindow('RGB')
def display_depth(dev, data, timestamp):
data -= np.min(data.ravel())
data *= 65536 / np.max(data.ravel())
image = cv.CreateImageHeader((data.shape[1], data.shape[0]),
cv.IPL_DEPTH_16U,
1)
cv.SetData(image, data.tostring(),
data.dtype.itemsize * data.shape[1])
cv.ShowImage('Depth', image)
cv.WaitKey(5)
def display_rgb(dev, data, timestamp):
image = cv.CreateImageHeader((data.shape[1], data.shape[0]),
cv.IPL_DEPTH_8U,
3)
# Note: We swap from RGB to BGR here
cv.SetData(image, data[:, :, ::-1].tostring(),
data.dtype.itemsize * 3 * data.shape[1])
cv.ShowImage('RGB', image)
cv.WaitKey(5)
freenect.runloop(depth=display_depth,
video=display_rgb)
| cnr-isti-vclab/meshlab | src/external/openkinect/wrappers/python/demo_cv_async.py | Python | gpl-3.0 | 988 |
# -*- coding: utf-8 -*-
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
orm.Boundary.objects.filter(category='Zip Code').update(sort_order=5)
def backwards(self, orm):
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.audit': {
'Meta': {'object_name': 'Audit'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'previous_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'ref': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Audit']", 'null': 'True'}),
'requires_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.benefitcurrencyconversion': {
'Meta': {'object_name': 'BenefitCurrencyConversion'},
'co2_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'currency_symbol': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'electricity_kwh_to_currency': ('django.db.models.fields.FloatField', [], {}),
'h20_gal_to_currency': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'natural_gas_kbtu_to_currency': ('django.db.models.fields.FloatField', [], {}),
'nox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'o3_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'pm10_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'sox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'voc_lb_to_currency': ('django.db.models.fields.FloatField', [], {})
},
u'treemap.boundary': {
'Meta': {'object_name': 'Boundary'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.fieldpermission': {
'Meta': {'unique_together': "((u'model_name', u'field_name', u'role', u'instance'),)", 'object_name': 'FieldPermission'},
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'permission_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"})
},
u'treemap.instance': {
'Meta': {'object_name': 'Instance'},
'basemap_data': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'basemap_type': ('django.db.models.fields.CharField', [], {'default': "u'google'", 'max_length': '255'}),
'boundaries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.Boundary']", 'null': 'True', 'blank': 'True'}),
'bounds': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
'config': ('treemap.json_field.JSONField', [], {'blank': 'True'}),
'default_role': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'default_role'", 'to': u"orm['treemap.Role']"}),
'eco_benefits_conversion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.BenefitCurrencyConversion']", 'null': 'True', 'blank': 'True'}),
'geo_rev': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'itree_region_default': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'url_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.User']", 'null': 'True', 'through': u"orm['treemap.InstanceUser']", 'blank': 'True'})
},
u'treemap.instanceuser': {
'Meta': {'object_name': 'InstanceUser'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.itreecodeoverride': {
'Meta': {'unique_together': "((u'instance_species', u'region'),)", 'object_name': 'ITreeCodeOverride'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']"}),
'itree_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.ITreeRegion']"})
},
u'treemap.itreeregion': {
'Meta': {'object_name': 'ITreeRegion'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'treemap.mapfeature': {
'Meta': {'object_name': 'MapFeature'},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'udfs': ('treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'})
},
u'treemap.plot': {
'Meta': {'object_name': 'Plot', '_ormbases': [u'treemap.MapFeature']},
'length': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'mapfeature_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['treemap.MapFeature']", 'unique': 'True', 'primary_key': 'True'}),
'owner_orig_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.reputationmetric': {
'Meta': {'object_name': 'ReputationMetric'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'approval_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'denial_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'direct_write_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'treemap.role': {
'Meta': {'object_name': 'Role'},
'default_permission': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rep_thresh': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.species': {
'Meta': {'object_name': 'Species'},
'bloom_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cultivar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fact_sheet': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fall_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'flower_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fruit_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'max_dbh': ('django.db.models.fields.IntegerField', [], {'default': '200'}),
'max_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
'native_status': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'otm_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'palatable_human': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'plant_guide': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'udfs': ('treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'}),
'wildlife_value': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.tree': {
'Meta': {'object_name': 'Tree'},
'canopy_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'date_planted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_removed': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'diameter': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'plot': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Plot']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']", 'null': 'True', 'blank': 'True'}),
'udfs': ('treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'})
},
u'treemap.treephoto': {
'Meta': {'object_name': 'TreePhoto'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'tree': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Tree']"})
},
u'treemap.user': {
'Meta': {'object_name': 'User'},
'allow_email_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'firstname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'lastname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'treemap.userdefinedcollectionvalue': {
'Meta': {'object_name': 'UserDefinedCollectionValue'},
'data': ('django_hstore.fields.DictionaryField', [], {}),
'field_definition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.UserDefinedFieldDefinition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.userdefinedfielddefinition': {
'Meta': {'object_name': 'UserDefinedFieldDefinition'},
'datatype': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'iscollection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'model_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['treemap']
symmetrical = True
| johnsonc/OTM2 | opentreemap/treemap/migrations/0061_change_zip_code_sort_order.py | Python | gpl-3.0 | 20,859 |
"""
Copyright 2008-2016 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import os
import sys
from . import ParseXML, Messages, Constants
from .Config import Config
from .Element import Element
from .generator import Generator
from .FlowGraph import FlowGraph
from .Connection import Connection
from .Block import Block
from .Port import Port
from .Param import Param
from .utils import odict, extract_docs
class Platform(Element):
Config = Config
Generator = Generator
FlowGraph = FlowGraph
Connection = Connection
Block = Block
Port = Port
Param = Param
is_platform = True
def __init__(self, *args, **kwargs):
""" Make a platform for GNU Radio """
Element.__init__(self)
self.config = self.Config(*args, **kwargs)
self.block_docstrings = {}
self.block_docstrings_loaded_callback = lambda: None # dummy to be replaced by BlockTreeWindow
self._docstring_extractor = extract_docs.SubprocessLoader(
callback_query_result=self._save_docstring_extraction_result,
callback_finished=lambda: self.block_docstrings_loaded_callback()
)
# Create a dummy flow graph for the blocks
self._flow_graph = Element(self)
self._flow_graph.connections = []
self.blocks = odict()
self._blocks_n = odict()
self._block_categories = {}
self.domains = {}
self.connection_templates = {}
self._auto_hier_block_generate_chain = set()
self.build_block_library()
def __str__(self):
return 'Platform - {}({})'.format(self.config.key, self.config.name)
@staticmethod
def find_file_in_paths(filename, paths, cwd):
"""Checks the provided paths relative to cwd for a certain filename"""
if not os.path.isdir(cwd):
cwd = os.path.dirname(cwd)
if isinstance(paths, str):
paths = (p for p in paths.split(':') if p)
for path in paths:
path = os.path.expanduser(path)
if not os.path.isabs(path):
path = os.path.normpath(os.path.join(cwd, path))
file_path = os.path.join(path, filename)
if os.path.exists(os.path.normpath(file_path)):
return file_path
def load_and_generate_flow_graph(self, file_path):
"""Loads a flow graph from file and generates it"""
Messages.set_indent(len(self._auto_hier_block_generate_chain))
Messages.send('>>> Loading: %r\n' % file_path)
if file_path in self._auto_hier_block_generate_chain:
Messages.send(' >>> Warning: cyclic hier_block dependency\n')
return False
self._auto_hier_block_generate_chain.add(file_path)
try:
flow_graph = self.get_new_flow_graph()
flow_graph.grc_file_path = file_path
# Other, nested higiter_blocks might be auto-loaded here
flow_graph.import_data(self.parse_flow_graph(file_path))
flow_graph.rewrite()
flow_graph.validate()
if not flow_graph.is_valid():
raise Exception('Flowgraph invalid')
if not flow_graph.get_option('generate_options').startswith('hb'):
raise Exception('Not a hier block')
except Exception as e:
Messages.send('>>> Load Error: {}: {}\n'.format(file_path, str(e)))
return False
finally:
self._auto_hier_block_generate_chain.discard(file_path)
Messages.set_indent(len(self._auto_hier_block_generate_chain))
try:
Messages.send('>>> Generating: {}\n'.format(file_path))
generator = self.Generator(flow_graph, file_path)
generator.write()
except Exception as e:
Messages.send('>>> Generate Error: {}: {}\n'.format(file_path, str(e)))
return False
self.load_block_xml(generator.get_file_path_xml())
return True
def build_block_library(self):
"""load the blocks and block tree from the search paths"""
self._docstring_extractor.start()
# Reset
self.blocks.clear()
self._blocks_n.clear()
self._block_categories.clear()
self.domains.clear()
self.connection_templates.clear()
ParseXML.xml_failures.clear()
# Try to parse and load blocks
for xml_file in self.iter_xml_files():
try:
if xml_file.endswith("block_tree.xml"):
self.load_category_tree_xml(xml_file)
elif xml_file.endswith('domain.xml'):
self.load_domain_xml(xml_file)
else:
self.load_block_xml(xml_file)
except ParseXML.XMLSyntaxError as e:
# print >> sys.stderr, 'Warning: Block validation failed:\n\t%s\n\tIgnoring: %s' % (e, xml_file)
pass
except Exception as e:
print >> sys.stderr, 'Warning: XML parsing failed:\n\t%r\n\tIgnoring: %s' % (e, xml_file)
# Add blocks to block tree
for key, block in self.blocks.iteritems():
category = self._block_categories.get(key, block.category)
# Blocks with empty categories are hidden
if not category:
continue
root = category[0]
if root.startswith('[') and root.endswith(']'):
category[0] = root[1:-1]
else:
category.insert(0, Constants.DEFAULT_BLOCK_MODULE_NAME)
block.category = category
self._docstring_extractor.finish()
# self._docstring_extractor.wait()
def iter_xml_files(self):
"""Iterator for block descriptions and category trees"""
for block_path in self.config.block_paths:
if os.path.isfile(block_path):
yield block_path
elif os.path.isdir(block_path):
for dirpath, dirnames, filenames in os.walk(block_path):
for filename in sorted(filter(lambda f: f.endswith('.xml'), filenames)):
yield os.path.join(dirpath, filename)
def load_block_xml(self, xml_file):
"""Load block description from xml file"""
# Validate and import
ParseXML.validate_dtd(xml_file, Constants.BLOCK_DTD)
n = ParseXML.from_file(xml_file).find('block')
n['block_wrapper_path'] = xml_file # inject block wrapper path
# Get block instance and add it to the list of blocks
block = self.Block(self._flow_graph, n)
key = block.get_key()
if key in self.blocks:
print >> sys.stderr, 'Warning: Block with key "{}" already exists.\n\tIgnoring: {}'.format(key, xml_file)
else: # Store the block
self.blocks[key] = block
self._blocks_n[key] = n
self._docstring_extractor.query(
block.get_key(),
block.get_imports(raw=True),
block.get_make(raw=True)
)
def load_category_tree_xml(self, xml_file):
"""Validate and parse category tree file and add it to list"""
ParseXML.validate_dtd(xml_file, Constants.BLOCK_TREE_DTD)
xml = ParseXML.from_file(xml_file)
path = []
def load_category(cat_n):
path.append(cat_n.find('name').strip())
for block_key in cat_n.findall('block'):
if block_key not in self._block_categories:
self._block_categories[block_key] = list(path)
for sub_cat_n in cat_n.findall('cat'):
load_category(sub_cat_n)
path.pop()
load_category(xml.find('cat'))
def load_domain_xml(self, xml_file):
"""Load a domain properties and connection templates from XML"""
ParseXML.validate_dtd(xml_file, Constants.DOMAIN_DTD)
n = ParseXML.from_file(xml_file).find('domain')
key = n.find('key')
if not key:
print >> sys.stderr, 'Warning: Domain with emtpy key.\n\tIgnoring: {}'.format(xml_file)
return
if key in self.domains: # test against repeated keys
print >> sys.stderr, 'Warning: Domain with key "{}" already exists.\n\tIgnoring: {}'.format(key, xml_file)
return
#to_bool = lambda s, d: d if s is None else s.lower() not in ('false', 'off', '0', '')
def to_bool(s, d):
if s is not None:
return s.lower() not in ('false', 'off', '0', '')
return d
color = n.find('color') or ''
try:
import gtk # ugly but handy
gtk.gdk.color_parse(color)
except (ValueError, ImportError):
if color: # no color is okay, default set in GUI
print >> sys.stderr, 'Warning: Can\'t parse color code "{}" for domain "{}" '.format(color, key)
color = None
self.domains[key] = dict(
name=n.find('name') or key,
multiple_sinks=to_bool(n.find('multiple_sinks'), True),
multiple_sources=to_bool(n.find('multiple_sources'), False),
color=color
)
for connection_n in n.findall('connection'):
key = (connection_n.find('source_domain'), connection_n.find('sink_domain'))
if not all(key):
print >> sys.stderr, 'Warning: Empty domain key(s) in connection template.\n\t{}'.format(xml_file)
elif key in self.connection_templates:
print >> sys.stderr, 'Warning: Connection template "{}" already exists.\n\t{}'.format(key, xml_file)
else:
self.connection_templates[key] = connection_n.find('make') or ''
def _save_docstring_extraction_result(self, key, docstrings):
docs = {}
for match, docstring in docstrings.iteritems():
if not docstring or match.endswith('_sptr'):
continue
docstring = docstring.replace('\n\n', '\n').strip()
docs[match] = docstring
self.block_docstrings[key] = docs
##############################################
# Access
##############################################
def parse_flow_graph(self, flow_graph_file):
"""
Parse a saved flow graph file.
Ensure that the file exists, and passes the dtd check.
Args:
flow_graph_file: the flow graph file
Returns:
nested data
@throws exception if the validation fails
"""
flow_graph_file = flow_graph_file or self.config.default_flow_graph
open(flow_graph_file, 'r').close() # Test open
ParseXML.validate_dtd(flow_graph_file, Constants.FLOW_GRAPH_DTD)
return ParseXML.from_file(flow_graph_file)
def get_new_flow_graph(self):
return self.FlowGraph(platform=self)
def get_blocks(self):
return self.blocks.values()
def get_new_block(self, flow_graph, key):
return self.Block(flow_graph, n=self._blocks_n[key])
def get_colors(self):
return [(name, color) for name, key, sizeof, color in Constants.CORE_TYPES]
| stwunsch/gnuradio | grc/core/Platform.py | Python | gpl-3.0 | 11,876 |
# Linktastic Module
# - A python2/3 compatible module that can create hardlinks/symlinks on windows-based systems
#
# Linktastic is distributed under the MIT License. The follow are the terms and conditions of using Linktastic.
#
# The MIT License (MIT)
# Copyright (c) 2012 Solipsis Development
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import subprocess
from subprocess import CalledProcessError
import os
if os.name == 'nt':
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Prevent spaces from messing with us!
def _escape_param(param):
return '"%s"' % param
# Private function to create link on nt-based systems
def _link_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /H %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _symlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _dirlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /J %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
def _junctionlink_windows(src, dest):
try:
subprocess.check_output(
'cmd /C mklink /D %s %s' % (_escape_param(dest), _escape_param(src)),
stderr=subprocess.STDOUT, startupinfo=info)
except CalledProcessError as err:
raise IOError(err.output.decode('utf-8'))
# TODO, find out what kind of messages Windows sends us from mklink
# print(stdout)
# assume if they ret-coded 0 we're good
# Create a hard link to src named as dest
# This version of link, unlike os.link, supports nt systems as well
def link(src, dest):
if os.name == 'nt':
_link_windows(src, dest)
else:
os.link(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def symlink(src, dest):
if os.name == 'nt':
_symlink_windows(src, dest)
else:
os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def dirlink(src, dest):
if os.name == 'nt':
_dirlink_windows(src, dest)
else:
os.symlink(src, dest)
# Create a symlink to src named as dest, but don't fail if you're on nt
def junctionlink(src, dest):
if os.name == 'nt':
_junctionlink_windows(src, dest)
else:
os.symlink(src, dest) | tortib/nzbToMedia | nzbtomedia/linktastic/linktastic.py | Python | gpl-3.0 | 4,260 |
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import frappe
from frappe import _
def execute(filters=None):
if not filters: filters = {}
if not filters.get("date"):
frappe.throw(_("Please select date"))
columns = get_columns(filters)
date = filters.get("date")
data = []
if not filters.get("shareholder"):
pass
else:
transfers = get_all_transfers(date, filters.get("shareholder"))
for transfer in transfers:
if transfer.transfer_type == 'Transfer':
if transfer.from_shareholder == filters.get("shareholder"):
transfer.transfer_type += ' to {}'.format(transfer.to_shareholder)
else:
transfer.transfer_type += ' from {}'.format(transfer.from_shareholder)
row = [filters.get("shareholder"), transfer.date, transfer.transfer_type,
transfer.share_type, transfer.no_of_shares, transfer.rate, transfer.amount,
transfer.company, transfer.name]
data.append(row)
return columns, data
def get_columns(filters):
columns = [
_("Shareholder") + ":Link/Shareholder:150",
_("Date") + ":Date:100",
_("Transfer Type") + "::140",
_("Share Type") + "::90",
_("No of Shares") + "::90",
_("Rate") + ":Currency:90",
_("Amount") + ":Currency:90",
_("Company") + "::150",
_("Share Transfer") + ":Link/Share Transfer:90"
]
return columns
def get_all_transfers(date, shareholder):
condition = ' '
# if company:
# condition = 'AND company = %(company)s '
return frappe.db.sql("""SELECT * FROM `tabShare Transfer`
WHERE (DATE(date) <= %(date)s AND from_shareholder = %(shareholder)s {condition})
OR (DATE(date) <= %(date)s AND to_shareholder = %(shareholder)s {condition})
ORDER BY date""".format(condition=condition),
{'date': date, 'shareholder': shareholder}, as_dict=1)
| mhbu50/erpnext | erpnext/accounts/report/share_ledger/share_ledger.py | Python | gpl-3.0 | 1,816 |
import frappe
from frappe.model.utils.rename_field import rename_field
def execute():
frappe.reload_doc("Healthcare", "doctype", "Inpatient Record")
if frappe.db.has_column("Inpatient Record", "discharge_date"):
rename_field("Inpatient Record", "discharge_date", "discharge_datetime")
| frappe/erpnext | erpnext/patches/v13_0/rename_discharge_date_in_ip_record.py | Python | gpl-3.0 | 291 |
import numpy as np
def CG(A, X, B, maxiter=20, tolerance=1.0e-10, verbose=False):
"""Solve X*A=B using conjugate gradient method.
``X`` and ``B`` are ``ndarrays```of shape ``(m, nx, ny, nz)``
coresponding to matrices of size ``m*n`` (``n=nx*ny*nz``) and
``A`` is a callable representing an ``n*n`` matrix::
A(X, Y)
will store ``X*A`` in the output array ``Y``.
On return ``X`` will be the solution to ``X*A=B`` within
``tolerance``."""
m = len(X)
shape = (m, 1, 1, 1)
R = np.empty(X.shape, X.dtype.char)
Q = np.empty(X.shape, X.dtype.char)
A(X, R)
R -= B
P = R.copy()
c1 = A.sum(np.reshape([abs(np.vdot(r, r)) for r in R], shape))
for i in range(maxiter):
error = sum(c1.ravel())
if verbose:
print 'CG-%d: %e' % (i, error)
if error < tolerance:
return i, error
A(P, Q)
#alpha = c1 / reshape([vdot(p, q) for p, q in zip(P, Q)], shape)
alpha = c1 / A.sum(np.reshape([np.vdot(q,p)
for p, q in zip(P, Q)], shape))
X -= alpha * P
R -= alpha * Q
c0 = c1
c1 = A.sum(np.reshape([abs(np.vdot(r, r)) for r in R], shape))
beta = c1 / c0
P *= beta
P += R
raise ArithmeticError('Did not converge!')
| qsnake/gpaw | gpaw/utilities/cg.py | Python | gpl-3.0 | 1,346 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from bpy.props import StringProperty, EnumProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
# Warning, changing this node without modifying the update system might break functionlaity
# bl_idname and var_name is used by the update system
class WifiOutNode(bpy.types.Node, SverchCustomTreeNode):
''' WifiOutNode '''
bl_idname = 'WifiOutNode'
bl_label = 'Wifi out'
bl_icon = 'OUTLINER_OB_EMPTY'
var_name = StringProperty(name='var_name',
default='')
def avail_var_name(self, context):
ng = self.id_data
out = [(n.var_name, n.var_name, "") for n in ng.nodes
if n.bl_idname == 'WifiInNode']
if out:
out.sort(key=lambda n: n[0])
return out
var_names = EnumProperty(items=avail_var_name, name="var names")
def set_var_name(self):
self.var_name = self.var_names
ng = self.id_data
wifi_dict = {node.var_name: node
for node in ng.nodes
if node.bl_idname == 'WifiInNode'}
self.outputs.clear()
if self.var_name in wifi_dict:
self.outputs.clear()
node = wifi_dict[self.var_name]
self.update()
else:
self.outputs.clear()
def reset_var_name(self):
self.var_name = ""
self.outputs.clear()
def draw_buttons(self, context, layout):
op_name = 'node.sverchok_text_callback'
if self.var_name:
row = layout.row()
row.label(text="Var:")
row.label(text=self.var_name)
op = layout.operator(op_name, text='Unlink')
op.fn_name = "reset_var_name"
else:
layout.prop(self, "var_names")
op = layout.operator(op_name, text='Link')
op.fn_name = "set_var_name"
def sv_init(self, context):
pass
def gen_var_name(self):
#from socket
if self.outputs:
n = self.outputs[0].name.rstrip("[0]")
self.var_name = n
def update(self):
if not self.var_name and self.outputs:
self.gen_var_name()
ng = self.id_data
wifi_dict = {node.var_name: node
for name, node in ng.nodes.items()
if node.bl_idname == 'WifiInNode'}
node = wifi_dict.get(self.var_name)
if node:
inputs = node.inputs
outputs = self.outputs
# match socket type
for idx, i_o in enumerate(zip(inputs, outputs)):
i_socket, o_socket = i_o
if i_socket.links:
f_socket = i_socket.links[0].from_socket
if f_socket.bl_idname != o_socket.bl_idname:
outputs.remove(o_socket)
outputs.new(f_socket.bl_idname, i_socket.name)
outputs.move(len(self.outputs)-1, idx)
# adjust number of inputs
while len(outputs) != len(inputs)-1:
if len(outputs) > len(inputs)-1:
outputs.remove(outputs[-1])
else:
n = len(outputs)
socket = inputs[n]
if socket.links:
s_type = socket.links[0].from_socket.bl_idname
else:
s_type = 'StringsSocket'
s_name = socket.name
outputs.new(s_type, s_name)
def process(self):
ng = self.id_data
wifi_dict = {node.var_name: node
for name, node in ng.nodes.items()
if node.bl_idname == 'WifiInNode'}
node = wifi_dict.get(self.var_name)
# transfer data
for in_socket, out_socket in zip(node.inputs, self.outputs):
if in_socket.is_linked and out_socket.is_linked:
data = in_socket.sv_get(deepcopy=False)
out_socket.sv_set(data)
def register():
bpy.utils.register_class(WifiOutNode)
def unregister():
bpy.utils.unregister_class(WifiOutNode)
| elfnor/sverchok | nodes/layout/wifi_out.py | Python | gpl-3.0 | 4,974 |
# This code has been taken from http://www.assembla.com/spaces/datatables_demo/wiki
from django.db.models import Q
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.utils.cache import add_never_cache_headers
from django.utils import simplejson
import os
from django.conf import settings
import logging, traceback
log = logging.getLogger(__name__)
#TODO: Fero def prepare_datatables_list
def prepare_datatables_queryset(request, querySet, columnIndexNameMap, *args):
"""
Retrieve querySet to be displayed in datatables..
Usage:
querySet: query set to draw data from.
columnIndexNameMap: field names in order to be displayed.
Return a tuple:
querySet: data to be displayed after this request
datatables parameters: a dict which includes
- iTotalRecords: total data before filtering
- iTotalDisplayRecords: total data after filtering
"""
try:
iTotalRecords = querySet.count() #count how many records are in queryset before matching final criteria
except:
return prepare_datatables_list(request, querySet, columnIndexNameMap, *args)
cols = int(request.GET.get('iColumns',0)) # Get the number of columns
iDisplayLength = min(int(request.GET.get('iDisplayLength',10)),100) #Safety measure. If someone messes with iDisplayLength manually, we clip it to the max value of 100.
startRecord = int(request.GET.get('iDisplayStart',0)) # Where the data starts from (page)
endRecord = startRecord + iDisplayLength # where the data ends (end of page)
# Ordering data
iSortingCols = int(request.GET.get('iSortingCols',0))
asortingCols = []
if iSortingCols:
for sortedColIndex in range(0, iSortingCols):
sortedColID = int(request.GET.get('iSortCol_'+str(sortedColIndex),0))
if request.GET.get('bSortable_{0}'.format(sortedColID), 'false') == 'true': # make sure the column is sortable first
sortedColName = columnIndexNameMap[sortedColID]
sortingDirection = request.GET.get('sSortDir_'+str(sortedColIndex), 'asc')
if sortingDirection == 'desc':
sortedColName = '-'+sortedColName
asortingCols.append(sortedColName)
querySet = querySet.order_by(*asortingCols)
# Determine which columns are searchable
searchableColumns = []
for col in range(0,cols):
if request.GET.get('bSearchable_{0}'.format(col), False) == 'true': searchableColumns.append(columnIndexNameMap[col])
# Apply filtering by value sent by user
customSearch = request.GET.get('sSearch', '').encode('utf-8');
if customSearch != '':
outputQ = None
first = True
for searchableColumn in searchableColumns:
kwargz = {searchableColumn+"__icontains" : customSearch}
outputQ = outputQ | Q(**kwargz) if outputQ else Q(**kwargz)
querySet = querySet.filter(outputQ)
# Individual column search
outputQ = None
for col in range(0,cols):
if request.GET.get('sSearch_{0}'.format(col), False) > '' and request.GET.get('bSearchable_{0}'.format(col), False) == 'true':
kwargz = {columnIndexNameMap[col]+"__icontains" : request.GET['sSearch_{0}'.format(col)]}
outputQ = outputQ & Q(**kwargz) if outputQ else Q(**kwargz)
if outputQ: querySet = querySet.filter(outputQ)
iTotalDisplayRecords = querySet.count() #count how many records match the final criteria
if endRecord > startRecord:
querySet = querySet[startRecord:endRecord] #get the slice
return querySet, {
'iTotalRecords' : iTotalRecords,
'iTotalDisplayRecords' : iTotalDisplayRecords,
}
def prepare_datatables_list(request, queryList, columnIndexNameMap, *args):
"""
Retrieve list of objects to be displayed in datatables..
Usage:
queryList: raw list of objects set to draw data from.
columnIndexNameMap: field names in order to be displayed.
Return a tuple:
queryList: data to be displayed after this request
datatables parameters: a dict which includes
- iTotalRecords: total data before filtering
- iTotalDisplayRecords: total data after filtering
"""
iTotalRecords = len(queryList)
# Ordering data
# Determine which columns are searchable
# Apply filtering by value sent by user
# Individual column search
return queryList, {
'iTotalRecords' : iTotalRecords,
'iTotalDisplayRecords' : iTotalRecords,
}
def render_datatables(request, records, dt_params, jsonTemplatePath, moreData=None):
"""
Render datatables..
Usage:
querySet: query set to draw data from.
dt_params: encapsulate datatables parameters. DataTables reference: http://www.datatables.net/ref
jsonTemplatePath: template file to generate custom json from.
"""
sEcho = int(request.GET.get('sEcho',0)) # required echo response
iTotalRecords = dt_params["iTotalRecords"]
iTotalDisplayRecords = dt_params["iTotalDisplayRecords"]
jstonString = render_to_string(jsonTemplatePath, locals()) #prepare the JSON with the response, consider using : from django.template.defaultfilters import escapejs
response = HttpResponse(jstonString, mimetype="application/javascript")
#prevent from caching datatables result
add_never_cache_headers(response)
return response
def render_datatables_automagic(request, querySet, columnIndexNameMap, iTotalRecords, iTotalDisplayRecords, moreData=None):
"""
Render datatables..
Usage:
querySet: query set to draw data from.
dt_params: encapsulate datatables parameters. DataTables reference: http://www.datatables.net/ref
columnIndexNameMap: field names in order to be displayed.
other parameters follows datatables specifications: http://www.datatables.net/ref
"""
sEcho = int(request.GET.get('sEcho',0)) # required echo response
# Pass sColumns
keys = columnIndexNameMap.keys()
keys.sort()
colitems = [columnIndexNameMap[key] for key in keys]
sColumns = ",".join(map(str,colitems))
aaData = []
a = querySet.values()
for row in a:
rowkeys = row.keys()
rowvalues = row.values()
rowlist = []
for col in range(0,len(colitems)):
for idx, val in enumerate(rowkeys):
if val == colitems[col]:
rowlist.append(str(rowvalues[idx]))
aaData.append(rowlist)
response_dict = {}
response_dict.update({'aaData':aaData})
response_dict.update({'sEcho': sEcho, 'iTotalRecords': iTotalRecords, 'iTotalDisplayRecords':iTotalDisplayRecords, 'sColumns':sColumns})
response_dict.update({'moreData':moreData})
response = HttpResponse(simplejson.dumps(response_dict), mimetype='application/javascript')
#prevent from caching datatables result
add_never_cache_headers(response)
return response
#Needed to insert images in report
def pisa_fetch_resources(uri, rel):
path = os.path.join(settings.MEDIA_ROOT, uri.replace(settings.MEDIA_URL, ""))
return path
#------------------------------------------------------------------------------
# Author: Luca Ferroni
# License: AGPLv3
from django.contrib.admin import helpers
from django.utils.safestring import mark_safe
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.template import Template
from django.template.response import TemplateResponse
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
# Naive implementation to be tuned as data protocol exchange for Ajax requests
template_success = Template("""
<div id="response" class="success" {{ extra_attrs }}>{{ msg }}</div>
""")
template_error = Template("""
<div id="response" class="error" {{ extra_attrs }}>{{ msg }}</div>
""")
HTTP_ERROR_INTERNAL = 505
HTTP_SUCCESS = 200
HTTP_REDIRECT = 302
def response_error(request, msg="error", on_complete=""):
context = {
'msg' : msg,
'http_status_code' : HTTP_ERROR_INTERNAL,
'exception_type' : type(msg),
'exception_msg' : unicode(msg),
}
if on_complete:
context['extra_attrs'] = 'on_complete="%s"' % on_complete
return TemplateResponse(request, template_error, context)
def response_success(request, msg="ok", on_complete=""):
context = {
'msg' : msg,
'http_status_code' : HTTP_SUCCESS,
}
if on_complete:
context['extra_attrs'] = 'on_complete="%s"' % on_complete
return TemplateResponse(request, template_success, context)
def response_redirect(request, url):
context = {
'http_status_code' : HTTP_REDIRECT,
}
return TemplateResponse(request, template_success, context)
#--------------------------------------------------------------------------------
class ResponseWrappedView(View):
"""Wrap the dispatcher in order to apply Ajax protocol for data exchange.
Used also as entry point for logging.
Now can be implemented also as a Middleware.
Let's see if we need some more customization or not...
"""
def dispatch(self, request, *args, **kwargs):
view_name = self.__class__.__name__.lower()
method = request.method.upper()
log.debug("%s:%s user %s args=%s kw=%s" % (
view_name, method, request.user, args, kwargs
))
try:
rv = super(ResponseWrappedView, self).dispatch(request, *args, **kwargs)
except Exception as e:
log.debug("%s:%s user %s exception raised %s tb=%s" % (
view_name, method, request.user, e, traceback.format_exc()
))
if request.is_ajax():
rv = response_error(request, msg=e)
else:
raise
return rv
class LoginRequiredView(ResponseWrappedView):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredView, self).dispatch(*args, **kwargs)
| OrlyMar/gasistafelice | gasistafelice/lib/views_support.py | Python | agpl-3.0 | 10,577 |
# -*- coding: utf-8 -*-
# ____________
# ___/ | \_____________ _ _ ___
# / ___/ | _ __ _ _| | ___ __ _ __| | \
# / \___/ ______/ | '_ \ || | |__/ _ \/ _` / _` | \
# \ ◯ | | .__/\_, |____\___/\__,_\__,_| /
# \_______\ /_______|_| |__/________________________/
# \ /
# \/
import _locale
import logging
import locale
import os
import pkg_resources
import semver
import sys
import traceback
# Info
APPID = "pyload"
PKGNAME = "pyload-ng"
PKGDIR = pkg_resources.resource_filename(__name__, "")
USERHOMEDIR = os.path.expanduser("~")
os.chdir(USERHOMEDIR)
__version__ = pkg_resources.get_distribution(PKGNAME).parsed_version.base_version
__version_info__ = semver.parse_version_info(__version__)
# Locale
locale.setlocale(locale.LC_ALL, "")
if os.name == "nt":
_locale._getdefaultlocale = lambda *args: ["en_US", "utf_8_sig"]
# Exception logger
exc_logger = logging.getLogger("exception")
def excepthook(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
msg_list = traceback.format_exception_only(exc_type, exc_value)
exc_info = (exc_type, exc_value, exc_traceback)
exc_logger.exception(msg_list[-1], exc_info=exc_info)
sys.excepthook = excepthook
del excepthook
# Cleanup
del _locale
del locale
del logging
del os
del pkg_resources
del semver
del sys
| vuolter/pyload | src/pyload/__init__.py | Python | agpl-3.0 | 1,504 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20160303_2340'),
('manager', '0005_auto_20160303_2008'),
]
operations = [
migrations.RenameField(
model_name='setting',
old_name='unit',
new_name='repetition_unit',
),
migrations.RenameField(
model_name='workoutlog',
old_name='unit',
new_name='repetition_unit',
),
migrations.AddField(
model_name='setting',
name='weight_unit',
field=models.ForeignKey(verbose_name='Unit', to='core.WeightUnit', default=1),
),
migrations.AddField(
model_name='workoutlog',
name='weight_unit',
field=models.ForeignKey(verbose_name='Unit', to='core.WeightUnit', default=1),
),
]
| DeveloperMal/wger | wger/manager/migrations/0006_auto_20160303_2138.py | Python | agpl-3.0 | 986 |
"""
Tests course_creators.admin.py.
"""
import mock
import django
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.core import mail
from django.http import HttpRequest
from django.test import TestCase
from course_creators.admin import CourseCreatorAdmin
from course_creators.models import CourseCreator
from student import auth
from student.roles import CourseCreatorRole
def mock_render_to_string(template_name, context):
"""Return a string that encodes template_name and context"""
return str((template_name, context))
class CourseCreatorAdminTest(TestCase):
"""
Tests for course creator admin.
"""
def setUp(self):
""" Test case setup """
super(CourseCreatorAdminTest, self).setUp()
self.user = User.objects.create_user('test_user', 'test_user+courses@edx.org', 'foo')
self.table_entry = CourseCreator(user=self.user)
self.table_entry.save()
self.admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo')
self.admin.is_staff = True
self.request = HttpRequest()
self.request.user = self.admin
self.creator_admin = CourseCreatorAdmin(self.table_entry, AdminSite())
self.studio_request_email = 'mark@marky.mark'
self.enable_creator_group_patch = {
"ENABLE_CREATOR_GROUP": True,
"STUDIO_REQUEST_EMAIL": self.studio_request_email
}
@mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True))
@mock.patch('django.contrib.auth.models.User.email_user')
def test_change_status(self, email_user):
"""
Tests that updates to state impact the creator group maintained in authz.py and that e-mails are sent.
"""
def change_state_and_verify_email(state, is_creator):
""" Changes user state, verifies creator status, and verifies e-mail is sent based on transition """
self._change_state(state)
self.assertEqual(is_creator, auth.user_has_role(self.user, CourseCreatorRole()))
context = {'studio_request_email': self.studio_request_email}
if state == CourseCreator.GRANTED:
template = 'emails/course_creator_granted.txt'
elif state == CourseCreator.DENIED:
template = 'emails/course_creator_denied.txt'
else:
template = 'emails/course_creator_revoked.txt'
email_user.assert_called_with(
mock_render_to_string('emails/course_creator_subject.txt', context),
mock_render_to_string(template, context),
self.studio_request_email
)
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# User is initially unrequested.
self.assertFalse(auth.user_has_role(self.user, CourseCreatorRole()))
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.DENIED, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.PENDING, False)
change_state_and_verify_email(CourseCreator.GRANTED, True)
change_state_and_verify_email(CourseCreator.UNREQUESTED, False)
change_state_and_verify_email(CourseCreator.DENIED, False)
@mock.patch('course_creators.admin.render_to_string', mock.Mock(side_effect=mock_render_to_string, autospec=True))
def test_mail_admin_on_pending(self):
"""
Tests that the admin account is notified when a user is in the 'pending' state.
"""
def check_admin_message_state(state, expect_sent_to_admin, expect_sent_to_user):
""" Changes user state and verifies e-mail sent to admin address only when pending. """
mail.outbox = []
self._change_state(state)
# If a message is sent to the user about course creator status change, it will be the first
# message sent. Admin message will follow.
base_num_emails = 1 if expect_sent_to_user else 0
if expect_sent_to_admin:
# TODO: Remove Django 1.11 upgrade shim
# SHIM: Usernames come back as unicode in 1.10+, remove this shim post-upgrade
if django.VERSION < (1, 10):
context = {'user_name': 'test_user', 'user_email': u'test_user+courses@edx.org'}
else:
context = {'user_name': u'test_user', 'user_email': u'test_user+courses@edx.org'}
self.assertEquals(base_num_emails + 1, len(mail.outbox), 'Expected admin message to be sent')
sent_mail = mail.outbox[base_num_emails]
self.assertEquals(
mock_render_to_string('emails/course_creator_admin_subject.txt', context),
sent_mail.subject
)
self.assertEquals(
mock_render_to_string('emails/course_creator_admin_user_pending.txt', context),
sent_mail.body
)
self.assertEquals(self.studio_request_email, sent_mail.from_email)
self.assertEqual([self.studio_request_email], sent_mail.to)
else:
self.assertEquals(base_num_emails, len(mail.outbox))
with mock.patch.dict('django.conf.settings.FEATURES', self.enable_creator_group_patch):
# E-mail message should be sent to admin only when new state is PENDING, regardless of what
# previous state was (unless previous state was already PENDING).
# E-mail message sent to user only on transition into and out of GRANTED state.
check_admin_message_state(CourseCreator.UNREQUESTED, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.GRANTED, expect_sent_to_admin=False, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=True, expect_sent_to_user=True)
check_admin_message_state(CourseCreator.PENDING, expect_sent_to_admin=False, expect_sent_to_user=False)
check_admin_message_state(CourseCreator.DENIED, expect_sent_to_admin=False, expect_sent_to_user=True)
def _change_state(self, state):
""" Helper method for changing state """
self.table_entry.state = state
self.creator_admin.save_model(self.request, self.table_entry, None, True)
def test_add_permission(self):
"""
Tests that staff cannot add entries
"""
self.assertFalse(self.creator_admin.has_add_permission(self.request))
def test_delete_permission(self):
"""
Tests that staff cannot delete entries
"""
self.assertFalse(self.creator_admin.has_delete_permission(self.request))
def test_change_permission(self):
"""
Tests that only staff can change entries
"""
self.assertTrue(self.creator_admin.has_change_permission(self.request))
self.request.user = self.user
self.assertFalse(self.creator_admin.has_change_permission(self.request))
def test_rate_limit_login(self):
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}):
post_params = {'username': self.user.username, 'password': 'wrong_password'}
# try logging in 30 times, the default limit in the number of failed
# login attempts in one 5 minute period before the rate gets limited
for _ in xrange(30):
response = self.client.post('/admin/login/', post_params)
self.assertEquals(response.status_code, 200)
response = self.client.post('/admin/login/', post_params)
# Since we are using the default rate limit behavior, we are
# expecting this to return a 403 error to indicate that there have
# been too many attempts
self.assertEquals(response.status_code, 403)
| procangroup/edx-platform | cms/djangoapps/course_creators/tests/test_admin.py | Python | agpl-3.0 | 8,540 |
""" Overrides for Docker-based devstack. """
from .devstack import * # pylint: disable=wildcard-import, unused-wildcard-import
# Docker does not support the syslog socket at /dev/log. Rely on the console.
LOGGING['handlers']['local'] = LOGGING['handlers']['tracking'] = {
'class': 'logging.NullHandler',
}
LOGGING['loggers']['tracking']['handlers'] = ['console']
LMS_BASE = 'edx.devstack.lms:18000'
CMS_BASE = 'edx.devstack.studio:18010'
SITE_NAME = LMS_BASE
LMS_ROOT_URL = 'http://{}'.format(LMS_BASE)
LMS_INTERNAL_ROOT_URL = LMS_ROOT_URL
ECOMMERCE_PUBLIC_URL_ROOT = 'http://localhost:18130'
ECOMMERCE_API_URL = 'http://edx.devstack.ecommerce:18130/api/v2'
COMMENTS_SERVICE_URL = 'http://edx.devstack.forum:4567'
ENTERPRISE_API_URL = '{}/enterprise/api/v1/'.format(LMS_INTERNAL_ROOT_URL)
CREDENTIALS_INTERNAL_SERVICE_URL = 'http://edx.devstack.credentials:18150'
CREDENTIALS_PUBLIC_SERVICE_URL = 'http://localhost:18150'
OAUTH_OIDC_ISSUER = '{}/oauth2'.format(LMS_ROOT_URL)
JWT_AUTH.update({
'JWT_SECRET_KEY': 'lms-secret',
'JWT_ISSUER': OAUTH_OIDC_ISSUER,
'JWT_AUDIENCE': 'lms-key',
})
FEATURES.update({
'AUTOMATIC_AUTH_FOR_TESTING': True,
'ENABLE_COURSEWARE_SEARCH': False,
'ENABLE_COURSE_DISCOVERY': False,
'ENABLE_DASHBOARD_SEARCH': False,
'ENABLE_DISCUSSION_SERVICE': True,
'SHOW_HEADER_LANGUAGE_SELECTOR': True,
'ENABLE_ENTERPRISE_INTEGRATION': False,
})
ENABLE_MKTG_SITE = os.environ.get('ENABLE_MARKETING_SITE', False)
MARKETING_SITE_ROOT = os.environ.get('MARKETING_SITE_ROOT', 'http://localhost:8080')
MKTG_URLS = {
'ABOUT': '/about',
'ACCESSIBILITY': '/accessibility',
'AFFILIATES': '/affiliates',
'BLOG': '/blog',
'CAREERS': '/careers',
'CONTACT': '/contact',
'COURSES': '/course',
'DONATE': '/donate',
'ENTERPRISE': '/enterprise',
'FAQ': '/student-faq',
'HONOR': '/edx-terms-service',
'HOW_IT_WORKS': '/how-it-works',
'MEDIA_KIT': '/media-kit',
'NEWS': '/news-announcements',
'PRESS': '/press',
'PRIVACY': '/edx-privacy-policy',
'ROOT': MARKETING_SITE_ROOT,
'SCHOOLS': '/schools-partners',
'SITE_MAP': '/sitemap',
'TOS': '/edx-terms-service',
'TOS_AND_HONOR': '/edx-terms-service',
'WHAT_IS_VERIFIED_CERT': '/verified-certificate',
}
CREDENTIALS_SERVICE_USERNAME = 'credentials_worker'
COURSE_CATALOG_API_URL = 'http://edx.devstack.discovery:18381/api/v1/'
| lduarte1991/edx-platform | lms/envs/devstack_docker.py | Python | agpl-3.0 | 2,419 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Albert SHENOUDA <albert.shenouda@efrei.net>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp.addons.recurring_contract.tests.test_base_contract \
import test_base_contract
import logging
logger = logging.getLogger(__name__)
class test_base_module(test_base_contract):
def setUp(self):
# Retrieve of income account
super(test_base_module, self).setUp()
self.property_account_income = self.env['account.account'].search([
('type', '=', 'other'),
('name', '=', 'Property Account Income Test')]).ids[0]
self.property_account_expense = self.env['account.account'].search([
('type', '=', 'other'),
('name', '=', 'Property Account Expense Test')
]).ids[0]
# Retrieve and modification of products
product_obj = self.env['product.product']
self.product_sp = product_obj.search(
[('name', '=', 'Sponsorship')])
self.product_gf = product_obj.search(
[('name', '=', 'General Fund')])
self.product_bf = product_obj.search(
[('name', '=', 'Birthday Gift')])
self.product_fg = product_obj.search(
[('name', '=', 'Family Gift')])
if self.product_sp:
self.product_sp[0].write({
'property_account_income': self.property_account_income,
'property_account_expense': self.property_account_expense,
})
if self.product_gf:
self.product_gf[0].write({
'property_account_income': self.property_account_income,
'property_account_expense': self.property_account_expense,
})
if self.product_bf:
self.product_bf[0].write({
'property_account_income': self.property_account_income,
'property_account_expense': self.property_account_expense,
})
if self.product_fg:
self.product_fg[0].write({
'property_account_income': self.property_account_income,
'property_account_expense': self.property_account_expense,
})
# Add of account for id's 1 product
product = self.env['product.product'].browse(1)
product.property_account_income = self.property_account_income
def _pay_invoice(self, invoice):
bank_journal = self.env['account.journal'].search(
[('code', '=', 'TBNK')])[0]
move_obj = self.env['account.move']
move_line_obj = self.env['account.move.line']
account_id = invoice.partner_id.property_account_receivable.id
move = move_obj.create({
'journal_id': bank_journal.id
})
move_line_obj.create({
'name': 'BNK-' + invoice.number,
'move_id': move.id,
'partner_id': invoice.partner_id.id,
'account_id': bank_journal.default_debit_account_id.id,
'debit': invoice.amount_total,
'journal_id': bank_journal.id,
'period_id': invoice.period_id.id,
'date': invoice.date_due
})
mv_line = move_line_obj.create({
'name': 'PAY-' + invoice.number,
'move_id': move.id,
'partner_id': invoice.partner_id.id,
'account_id': account_id,
'credit': invoice.amount_total,
'journal_id': invoice.journal_id.id,
'period_id': invoice.period_id.id,
'date': invoice.date_due
})
move.button_validate()
to_reconcile = move_line_obj.search([
('move_id', '=', invoice.move_id.id),
('account_id', '=', account_id)]) + mv_line
to_reconcile.reconcile()
| MickSandoz/compassion-modules | contract_compassion/tests/test_base_module.py | Python | agpl-3.0 | 4,143 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import SUPERUSER_ID
from odoo.http import request, route
from odoo.addons.bus.controllers.main import BusController
class MailChatController(BusController):
def _default_request_uid(self):
""" For Anonymous people, they receive the access right of SUPERUSER_ID since they have NO access (auth=none)
!!! Each time a method from this controller is call, there is a check if the user (who can be anonymous and Sudo access)
can access to the resource.
"""
return request.session.uid and request.session.uid or SUPERUSER_ID
# --------------------------
# Extends BUS Controller Poll
# --------------------------
def _poll(self, dbname, channels, last, options):
if request.session.uid:
partner_id = request.env.user.partner_id.id
if partner_id:
channels = list(channels) # do not alter original list
for mail_channel in request.env['mail.channel'].search([('channel_partner_ids', 'in', [partner_id])]):
channels.append((request.db, 'mail.channel', mail_channel.id))
# personal and needaction channel
channels.append((request.db, 'res.partner', partner_id))
channels.append((request.db, 'ir.needaction', partner_id))
return super(MailChatController, self)._poll(dbname, channels, last, options)
# --------------------------
# Anonymous routes (Common Methods)
# --------------------------
@route('/mail/chat_post', type="json", auth="none")
def mail_chat_post(self, uuid, message_content, **kwargs):
request_uid = self._default_request_uid()
# find the author from the user session, which can be None
author_id = False # message_post accept 'False' author_id, but not 'None'
if request.session.uid:
author_id = request.env['res.users'].sudo().browse(request.session.uid).partner_id.id
# post a message without adding followers to the channel. email_from=False avoid to get author from email data
mail_channel = request.env["mail.channel"].sudo(request_uid).search([('uuid', '=', uuid)], limit=1)
message = mail_channel.sudo(request_uid).with_context(mail_create_nosubscribe=True).message_post(author_id=author_id, email_from=False, body=message_content, message_type='comment', subtype='mail.mt_comment', content_subtype='plaintext', **kwargs)
return message and message.id or False
@route(['/mail/chat_history'], type="json", auth="none")
def mail_chat_history(self, uuid, last_id=False, limit=20):
request_uid = self._default_request_uid()
channel = request.env["mail.channel"].sudo(request_uid).search([('uuid', '=', uuid)], limit=1)
if not channel:
return []
else:
return channel.sudo(request_uid).channel_fetch_message(last_id, limit)
| Aravinthu/odoo | addons/mail/controllers/bus.py | Python | agpl-3.0 | 3,026 |
import psutil
def is_up(ps_name):
return True in [psutil.Process(pid).name() == ps_name for pid in psutil.get_pid_list()]
| knmkr/perGENIE | pergenie/lib/utils/service.py | Python | agpl-3.0 | 127 |
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('lms_initialization.apps', 'lms.djangoapps.lms_initialization.apps')
from lms.djangoapps.lms_initialization.apps import *
| eduNEXT/edunext-platform | import_shims/lms/lms_initialization/apps.py | Python | agpl-3.0 | 392 |
from browser import window
from preprocess import transform
from reeborg_en import * # NOQA
src = transform(window.editor.getValue())
exec(src)
| code4futuredotorg/reeborg_tw | src/python/editor.py | Python | agpl-3.0 | 145 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Wt(CMakePackage):
"""Wt, C++ Web Toolkit.
Wt is a C++ library for developing web applications."""
homepage = "http://www.webtoolkit.eu/wt"
url = "https://github.com/emweb/wt/archive/3.3.7.tar.gz"
version('3.3.7', '09858901f2dcf5c3d36a9237daba3e3f')
version('master', branch='master',
git='https://github.com/emweb/wt.git')
# wt builds in parallel, but requires more than 5 GByte RAM per -j <njob>
# which most machines do not provide and crash the build
parallel = False
variant('openssl', default=True,
description='SSL and WebSockets support in the built-in httpd, '
'the HTTP(S) client, and additional cryptographic '
'hashes in the authentication module')
variant('libharu', default=True, description='painting to PDF')
# variant('graphicsmagick', default=True,
# description='painting to PNG, GIF')
variant('sqlite', default=False, description='create SQLite3 DBO')
variant('mariadb', default=False, description='create MariaDB/MySQL DBO')
variant('postgresql', default=False, description='create PostgreSQL DBO')
# variant('firebird', default=False, description='create Firebird DBO')
variant('pango', default=True,
description='improved font support in PDF and raster image '
'painting')
variant('zlib', default=True,
description='compression in the built-in httpd')
# variant('fastcgi', default=False,
# description='FastCGI connector via libfcgi++')
depends_on('boost@1.46.1:')
depends_on('openssl', when='+openssl')
depends_on('libharu', when='+libharu')
depends_on('sqlite', when='+sqlite')
depends_on('mariadb', when='+mariadb')
depends_on('postgresql', when='+postgresql')
depends_on('pango', when='+pango')
depends_on('zlib', when='+zlib')
def cmake_args(self):
spec = self.spec
cmake_args = [
'-DBUILD_EXAMPLES:BOOL=OFF',
'-DCONNECTOR_FCGI:BOOL=OFF',
'-DENABLE_OPENGL:BOOL=OFF',
'-DENABLE_QT4:BOOL=OFF'
]
cmake_args.extend([
'-DENABLE_SSL:BOOL={0}'.format((
'ON' if '+openssl' in spec else 'OFF')),
'-DENABLE_HARU:BOOL={0}'.format((
'ON' if '+libharu' in spec else 'OFF')),
'-DENABLE_PANGO:BOOL={0}'.format((
'ON' if '+pango' in spec else 'OFF')),
'-DENABLE_SQLITE:BOOL={0}'.format((
'ON' if '+sqlite' in spec else 'OFF')),
'-DENABLE_MYSQL:BOOL={0}'.format((
'ON' if '+mariadb' in spec else 'OFF')),
'-DENABLE_POSTGRES:BOOL={0}'.format((
'ON' if '+postgres' in spec else 'OFF'))
])
return cmake_args
| EmreAtes/spack | var/spack/repos/builtin/packages/wt/package.py | Python | lgpl-2.1 | 4,106 |
#!/usr/bin/env python
import os, sys, codecs, re
def usage():
print "Usage info for extract_references.py"
print " extract_references.py ref_sgml ref_prefix"
print
sys.exit()
def main():
if (len(sys.argv) < 3 or sys.argv[1] == "-h"):
usage()
sgml = codecs.open(sys.argv[1], "r", "utf-8")
prefix = sys.argv[2]
doc_pattern = re.compile('.* docid="([^"]*).*"')
seg_pattern = re.compile('.* id="([^"]*)".*')
ref_sets = []
cur_ref_set = []
cur_doc = ""
cur_seg = ""
cur_txt = ""
for line in sgml.readlines():
line_tc = line.strip()
line = line_tc.lower()
if ("<doc " in line):
cur_doc = doc_pattern.search(line).groups()[0]
if ("</refset " in line or
("<doc " in line and cur_doc in map(lambda x: x[0], cur_ref_set))):
ref_sets.append(cur_ref_set)
cur_ref_set = []
if ("<seg " in line):
cur_seg = seg_pattern.search(line).groups()[0]
cur_txt = re.sub("<[^>]*>", "", line_tc)
cur_ref_set.append((cur_doc, cur_seg, cur_txt))
ref_files = []
ref_count = len(ref_sets[0])
for i, ref_set in enumerate(ref_sets):
if (ref_count != len(ref_set)):
print "[ERR] reference lengths do not match: " + str(ref_count) \
+ " vs. " + str(len(ref_set)) + " (ref " + str(i) + ")"
ref_files.append(codecs.open(prefix + "_ref." + str(i), "w", "utf-8"))
for j in range(ref_count):
(cur_doc, cur_seg, cur_txt) = ref_sets[0][j]
for i in range(len(ref_sets)):
if (j >= len(ref_sets[i])):
continue
(doc, seg, txt) = ref_sets[i][j]
if (doc != cur_doc or seg != cur_seg):
print "[ERR] document, segment ids don't match up: "
print "\t" + doc + " vs. " + cur_doc
print "\t" + seg + " vs. " + cur_seg
ref_files[i].write(txt + "\n")
for ref_file in ref_files:
ref_file.close()
if __name__ == "__main__":
main()
| gwenniger/joshua | scripts/toolkit/extract_references.py | Python | lgpl-2.1 | 1,922 |
"""Clock/event scheduler.
This is a Pygame implementation of a scheduler inspired by the clock
classes in Pyglet.
"""
import heapq
from weakref import ref
from functools import total_ordering
from types import MethodType
__all__ = [
'Clock', 'schedule', 'schedule_interval', 'unschedule'
]
def weak_method(method):
"""Quick weak method ref in case users aren't using Python 3.4"""
selfref = ref(method.__self__)
funcref = ref(method.__func__)
def weakref():
self = selfref()
func = funcref()
if self is None or func is None:
return None
return func.__get__(self)
return weakref
def mkref(o):
if isinstance(o, MethodType):
return weak_method(o)
else:
return ref(o)
@total_ordering
class Event:
"""An event scheduled for a future time.
Events are ordered by their scheduled execution time.
"""
def __init__(self, time, cb, repeat=None):
self.time = time
self.repeat = repeat
self.cb = mkref(cb)
self.name = str(cb)
self.repeat = repeat
def __lt__(self, ano):
return self.time < ano.time
def __eq__(self, ano):
return self.time == ano.time
@property
def callback(self):
return self.cb()
class Clock:
"""A clock used for event scheduling.
When tick() is called, all events scheduled for before now will be called
in order.
tick() would typically be called from the game loop for the default clock.
Additional clocks could be created - for example, a game clock that could
be suspended in pause screens. Your code must take care of calling tick()
or not. You could also run the clock at a different rate if desired, by
scaling dt before passing it to tick().
"""
def __init__(self):
self.t = 0
self.fired = False
self.events = []
self._each_tick = []
def schedule(self, callback, delay):
"""Schedule callback to be called once, at `delay` seconds from now.
:param callback: A parameterless callable to be called.
:param delay: The delay before the call (in clock time / seconds).
"""
heapq.heappush(self.events, Event(self.t + delay, callback, None))
def schedule_unique(self, callback, delay):
"""Schedule callback to be called once, at `delay` seconds from now.
If it was already scheduled, postpone its firing.
:param callback: A parameterless callable to be called.
:param delay: The delay before the call (in clock time / seconds).
"""
self.unschedule(callback)
self.schedule(callback, delay)
def schedule_interval(self, callback, delay):
"""Schedule callback to be called every `delay` seconds.
The first occurrence will be after `delay` seconds.
:param callback: A parameterless callable to be called.
:param delay: The interval in seconds.
"""
heapq.heappush(self.events, Event(self.t + delay, callback, delay))
def unschedule(self, callback):
"""Unschedule the given callback.
If scheduled multiple times all instances will be unscheduled.
"""
self.events = [e for e in self.events if e.callback != callback and e.callback is not None]
heapq.heapify(self.events)
self._each_tick = [e for e in self._each_tick if e() != callback]
def each_tick(self, callback):
"""Schedule a callback to be called every tick.
Unlike the standard scheduler functions, the callable is passed the
elapsed clock time since the last call (the same value passed to tick).
"""
self._each_tick.append(mkref(callback))
def _fire_each_tick(self, dt):
dead = [None]
for r in self._each_tick:
cb = r()
if cb is not None:
self.fired = True
try:
cb(dt)
except Exception:
import traceback
traceback.print_exc()
dead.append(cb)
self._each_tick = [e for e in self._each_tick if e() not in dead]
def tick(self, dt):
"""Update the clock time and fire all scheduled events.
:param dt: The elapsed time in seconds.
"""
self.fired = False
self.t += float(dt)
self._fire_each_tick(dt)
while self.events and self.events[0].time <= self.t:
ev = heapq.heappop(self.events)
cb = ev.callback
if not cb:
continue
if ev.repeat is not None:
self.schedule_interval(cb, ev.repeat)
self.fired = True
try:
cb()
except Exception:
import traceback
traceback.print_exc()
self.unschedule(cb)
# One instance of a clock is available by default, to simplify the API
clock = Clock()
tick = clock.tick
schedule = clock.schedule
schedule_interval = clock.schedule_interval
schedule_unique = clock.schedule_unique
unschedule = clock.unschedule
each_tick = clock.each_tick
| yrobla/pyjuegos | pgzero/clock.py | Python | lgpl-3.0 | 5,190 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import urllib
from urllib import unquote
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift.common import utils, exceptions
from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \
HTTPRequestEntityTooLarge, HTTPPreconditionFailed
MAX_FILE_SIZE = 5368709122
MAX_META_NAME_LENGTH = 128
MAX_META_VALUE_LENGTH = 256
MAX_META_COUNT = 90
MAX_META_OVERALL_SIZE = 4096
MAX_HEADER_SIZE = 8192
MAX_OBJECT_NAME_LENGTH = 1024
CONTAINER_LISTING_LIMIT = 10000
ACCOUNT_LISTING_LIMIT = 10000
MAX_ACCOUNT_NAME_LENGTH = 256
MAX_CONTAINER_NAME_LENGTH = 256
# If adding an entry to DEFAULT_CONSTRAINTS, note that
# these constraints are automatically published by the
# proxy server in responses to /info requests, with values
# updated by reload_constraints()
DEFAULT_CONSTRAINTS = {
'max_file_size': MAX_FILE_SIZE,
'max_meta_name_length': MAX_META_NAME_LENGTH,
'max_meta_value_length': MAX_META_VALUE_LENGTH,
'max_meta_count': MAX_META_COUNT,
'max_meta_overall_size': MAX_META_OVERALL_SIZE,
'max_header_size': MAX_HEADER_SIZE,
'max_object_name_length': MAX_OBJECT_NAME_LENGTH,
'container_listing_limit': CONTAINER_LISTING_LIMIT,
'account_listing_limit': ACCOUNT_LISTING_LIMIT,
'max_account_name_length': MAX_ACCOUNT_NAME_LENGTH,
'max_container_name_length': MAX_CONTAINER_NAME_LENGTH,
}
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {} # any constraints overridden by SWIFT_CONF_FILE
EFFECTIVE_CONSTRAINTS = {} # populated by reload_constraints
def reload_constraints():
"""
Parse SWIFT_CONF_FILE and reset module level global contraint attrs,
populating OVERRIDE_CONSTRAINTS AND EFFECTIVE_CONSTRAINTS along the way.
"""
global SWIFT_CONSTRAINTS_LOADED, OVERRIDE_CONSTRAINTS
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {}
constraints_conf = ConfigParser()
if constraints_conf.read(utils.SWIFT_CONF_FILE):
SWIFT_CONSTRAINTS_LOADED = True
for name in DEFAULT_CONSTRAINTS:
try:
value = int(constraints_conf.get('swift-constraints', name))
except NoOptionError:
pass
except NoSectionError:
# We are never going to find the section for another option
break
else:
OVERRIDE_CONSTRAINTS[name] = value
for name, default in DEFAULT_CONSTRAINTS.items():
value = OVERRIDE_CONSTRAINTS.get(name, default)
EFFECTIVE_CONSTRAINTS[name] = value
# "globals" in this context is module level globals, always.
globals()[name.upper()] = value
reload_constraints()
# Maximum slo segments in buffer
MAX_BUFFERED_SLO_SEGMENTS = 10000
#: Query string format= values to their corresponding content-type values
FORMAT2CONTENT_TYPE = {'plain': 'text/plain', 'json': 'application/json',
'xml': 'application/xml'}
def check_metadata(req, target_type):
"""
Check metadata sent in the request headers.
:param req: request object
:param target_type: str: one of: object, container, or account: indicates
which type the target storage for the metadata is
:returns: HTTPBadRequest with bad metadata otherwise None
"""
prefix = 'x-%s-meta-' % target_type.lower()
meta_count = 0
meta_size = 0
for key, value in req.headers.iteritems():
if isinstance(value, basestring) and len(value) > MAX_HEADER_SIZE:
return HTTPBadRequest(body='Header value too long: %s' %
key[:MAX_META_NAME_LENGTH],
request=req, content_type='text/plain')
if not key.lower().startswith(prefix):
continue
key = key[len(prefix):]
if not key:
return HTTPBadRequest(body='Metadata name cannot be empty',
request=req, content_type='text/plain')
meta_count += 1
meta_size += len(key) + len(value)
if len(key) > MAX_META_NAME_LENGTH:
return HTTPBadRequest(
body='Metadata name too long: %s%s' % (prefix, key),
request=req, content_type='text/plain')
elif len(value) > MAX_META_VALUE_LENGTH:
return HTTPBadRequest(
body='Metadata value longer than %d: %s%s' % (
MAX_META_VALUE_LENGTH, prefix, key),
request=req, content_type='text/plain')
elif meta_count > MAX_META_COUNT:
return HTTPBadRequest(
body='Too many metadata items; max %d' % MAX_META_COUNT,
request=req, content_type='text/plain')
elif meta_size > MAX_META_OVERALL_SIZE:
return HTTPBadRequest(
body='Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE,
request=req, content_type='text/plain')
return None
def check_object_creation(req, object_name):
"""
Check to ensure that everything is alright about an object to be created.
:param req: HTTP request object
:param object_name: name of object to be created
:returns HTTPRequestEntityTooLarge: the object is too large
:returns HTTPLengthRequired: missing content-length header and not
a chunked request
:returns HTTPBadRequest: missing or bad content-type header, or
bad metadata
"""
if req.content_length and req.content_length > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(body='Your request is too large.',
request=req,
content_type='text/plain')
if req.content_length is None and \
req.headers.get('transfer-encoding') != 'chunked':
return HTTPLengthRequired(request=req)
if 'X-Copy-From' in req.headers and req.content_length:
return HTTPBadRequest(body='Copy requests require a zero byte body',
request=req, content_type='text/plain')
if len(object_name) > MAX_OBJECT_NAME_LENGTH:
return HTTPBadRequest(body='Object name length of %d longer than %d' %
(len(object_name), MAX_OBJECT_NAME_LENGTH),
request=req, content_type='text/plain')
if 'Content-Type' not in req.headers:
return HTTPBadRequest(request=req, content_type='text/plain',
body='No content type')
if not check_utf8(req.headers['Content-Type']):
return HTTPBadRequest(request=req, body='Invalid Content-Type',
content_type='text/plain')
return check_metadata(req, 'object')
def check_mount(root, drive):
"""
Verify that the path to the device is a mount point and mounted. This
allows us to fast fail on drives that have been unmounted because of
issues, and also prevents us for accidentally filling up the root
partition.
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:returns: True if it is a valid mounted device, False otherwise
"""
if not (urllib.quote_plus(drive) == drive):
return False
path = os.path.join(root, drive)
return utils.ismount(path)
def check_float(string):
"""
Helper function for checking if a string can be converted to a float.
:param string: string to be verified as a float
:returns: True if the string can be converted to a float, False otherwise
"""
try:
float(string)
return True
except ValueError:
return False
def valid_timestamp(request):
"""
Helper function to extract a timestamp from requests that require one.
:param request: the swob request object
:returns: a valid Timestamp instance
:raises: HTTPBadRequest on missing or invalid X-Timestamp
"""
try:
return request.timestamp
except exceptions.InvalidTimestamp as e:
raise HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
def check_utf8(string):
"""
Validate if a string is valid UTF-8 str or unicode and that it
does not contain any null character.
:param string: string to be validated
:returns: True if the string is valid utf-8 str or unicode and
contains no null characters, False otherwise
"""
if not string:
return False
try:
if isinstance(string, unicode):
string.encode('utf-8')
else:
string.decode('UTF-8')
return '\x00' not in string
# If string is unicode, decode() will raise UnicodeEncodeError
# So, we should catch both UnicodeDecodeError & UnicodeEncodeError
except UnicodeError:
return False
def check_copy_from_header(req):
"""
Validate that the value from x-copy-from header is
well formatted. We assume the caller ensures that
x-copy-from header is present in req.headers.
:param req: HTTP request object
:returns: A tuple with container name and object name
:raise: HTTPPreconditionFailed if x-copy-from value
is not well formatted.
"""
src_header = unquote(req.headers.get('X-Copy-From'))
if not src_header.startswith('/'):
src_header = '/' + src_header
try:
return utils.split_path(src_header, 2, 2, True)
except ValueError:
raise HTTPPreconditionFailed(
request=req,
body='X-Copy-From header must be of the form'
'<container name>/<object name>')
| kalrey/swift | swift/common/constraints.py | Python | apache-2.0 | 10,266 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import string
from cinder.brick.initiator import linuxscsi
from cinder.openstack.common import log as logging
from cinder import test
from cinder import utils
LOG = logging.getLogger(__name__)
class LinuxSCSITestCase(test.TestCase):
def setUp(self):
super(LinuxSCSITestCase, self).setUp()
self.cmds = []
self.stubs.Set(os.path, 'realpath', lambda x: '/dev/sdc')
self.linuxscsi = linuxscsi.LinuxSCSI(None, execute=self.fake_execute)
def fake_execute(self, *cmd, **kwargs):
self.cmds.append(string.join(cmd))
return "", None
def test_echo_scsi_command(self):
self.linuxscsi.echo_scsi_command("/some/path", "1")
expected_commands = ['tee -a /some/path']
self.assertEqual(expected_commands, self.cmds)
def test_get_name_from_path(self):
device_name = "/dev/sdc"
self.stubs.Set(os.path, 'realpath', lambda x: device_name)
disk_path = ("/dev/disk/by-path/ip-10.10.220.253:3260-"
"iscsi-iqn.2000-05.com.3pardata:21810002ac00383d-lun-0")
name = self.linuxscsi.get_name_from_path(disk_path)
self.assertEqual(name, device_name)
self.stubs.Set(os.path, 'realpath', lambda x: "bogus")
name = self.linuxscsi.get_name_from_path(disk_path)
self.assertIsNone(name)
def test_remove_scsi_device(self):
self.stubs.Set(os.path, "exists", lambda x: False)
self.linuxscsi.remove_scsi_device("sdc")
expected_commands = []
self.assertEqual(expected_commands, self.cmds)
self.stubs.Set(os.path, "exists", lambda x: True)
self.linuxscsi.remove_scsi_device("sdc")
expected_commands = [('tee -a /sys/block/sdc/device/delete')]
self.assertEqual(expected_commands, self.cmds)
def test_flush_multipath_device(self):
self.linuxscsi.flush_multipath_device('/dev/dm-9')
expected_commands = [('multipath -f /dev/dm-9')]
self.assertEqual(expected_commands, self.cmds)
def test_flush_multipath_devices(self):
self.linuxscsi.flush_multipath_devices()
expected_commands = [('multipath -F')]
self.assertEqual(expected_commands, self.cmds)
def test_remove_multipath_device(self):
def fake_find_multipath_device(device):
devices = [{'device': '/dev/sde', 'host': 0,
'channel': 0, 'id': 0, 'lun': 1},
{'device': '/dev/sdf', 'host': 2,
'channel': 0, 'id': 0, 'lun': 1}, ]
info = {"device": "dm-3",
"id": "350002ac20398383d",
"devices": devices}
return info
self.stubs.Set(os.path, "exists", lambda x: True)
self.stubs.Set(self.linuxscsi, 'find_multipath_device',
fake_find_multipath_device)
self.linuxscsi.remove_multipath_device('/dev/dm-3')
expected_commands = [('tee -a /sys/block/sde/device/delete'),
('tee -a /sys/block/sdf/device/delete'),
('multipath -f 350002ac20398383d'), ]
self.assertEqual(expected_commands, self.cmds)
def test_find_multipath_device_3par(self):
def fake_execute(*cmd, **kwargs):
out = ("mpath6 (350002ac20398383d) dm-3 3PARdata,VV\n"
"size=2.0G features='0' hwhandler='0' wp=rw\n"
"`-+- policy='round-robin 0' prio=-1 status=active\n"
" |- 0:0:0:1 sde 8:64 active undef running\n"
" `- 2:0:0:1 sdf 8:80 active undef running\n"
)
return out, None
def fake_execute2(*cmd, **kwargs):
out = ("350002ac20398383d dm-3 3PARdata,VV\n"
"size=2.0G features='0' hwhandler='0' wp=rw\n"
"`-+- policy='round-robin 0' prio=-1 status=active\n"
" |- 0:0:0:1 sde 8:64 active undef running\n"
" `- 2:0:0:1 sdf 8:80 active undef running\n"
)
return out, None
self.stubs.Set(self.linuxscsi, '_execute', fake_execute)
info = self.linuxscsi.find_multipath_device('/dev/sde')
LOG.error("info = %s" % info)
self.assertEqual("/dev/dm-3", info["device"])
self.assertEqual("/dev/sde", info['devices'][0]['device'])
self.assertEqual("0", info['devices'][0]['host'])
self.assertEqual("0", info['devices'][0]['id'])
self.assertEqual("0", info['devices'][0]['channel'])
self.assertEqual("1", info['devices'][0]['lun'])
self.assertEqual("/dev/sdf", info['devices'][1]['device'])
self.assertEqual("2", info['devices'][1]['host'])
self.assertEqual("0", info['devices'][1]['id'])
self.assertEqual("0", info['devices'][1]['channel'])
self.assertEqual("1", info['devices'][1]['lun'])
def test_find_multipath_device_svc(self):
def fake_execute(*cmd, **kwargs):
out = ("36005076da00638089c000000000004d5 dm-2 IBM,2145\n"
"size=954M features='1 queue_if_no_path' hwhandler='0'"
" wp=rw\n"
"|-+- policy='round-robin 0' prio=-1 status=active\n"
"| |- 6:0:2:0 sde 8:64 active undef running\n"
"| `- 6:0:4:0 sdg 8:96 active undef running\n"
"`-+- policy='round-robin 0' prio=-1 status=enabled\n"
" |- 6:0:3:0 sdf 8:80 active undef running\n"
" `- 6:0:5:0 sdh 8:112 active undef running\n"
)
return out, None
self.stubs.Set(self.linuxscsi, '_execute', fake_execute)
info = self.linuxscsi.find_multipath_device('/dev/sde')
LOG.error("info = %s" % info)
self.assertEqual("/dev/dm-2", info["device"])
self.assertEqual("/dev/sde", info['devices'][0]['device'])
self.assertEqual("6", info['devices'][0]['host'])
self.assertEqual("0", info['devices'][0]['channel'])
self.assertEqual("2", info['devices'][0]['id'])
self.assertEqual("0", info['devices'][0]['lun'])
self.assertEqual("/dev/sdf", info['devices'][2]['device'])
self.assertEqual("6", info['devices'][2]['host'])
self.assertEqual("0", info['devices'][2]['channel'])
self.assertEqual("3", info['devices'][2]['id'])
self.assertEqual("0", info['devices'][2]['lun'])
def test_find_multipath_device_ds8000(self):
def fake_execute(*cmd, **kwargs):
out = ("36005076303ffc48e0000000000000101 dm-2 IBM,2107900\n"
"size=1.0G features='1 queue_if_no_path' hwhandler='0'"
" wp=rw\n"
"`-+- policy='round-robin 0' prio=-1 status=active\n"
" |- 6:0:2:0 sdd 8:64 active undef running\n"
" `- 6:1:0:3 sdc 8:32 active undef running\n"
)
return out, None
self.stubs.Set(self.linuxscsi, '_execute', fake_execute)
info = self.linuxscsi.find_multipath_device('/dev/sdd')
LOG.error("info = %s" % info)
self.assertEqual("/dev/dm-2", info["device"])
self.assertEqual("/dev/sdd", info['devices'][0]['device'])
self.assertEqual("6", info['devices'][0]['host'])
self.assertEqual("0", info['devices'][0]['channel'])
self.assertEqual("2", info['devices'][0]['id'])
self.assertEqual("0", info['devices'][0]['lun'])
self.assertEqual("/dev/sdc", info['devices'][1]['device'])
self.assertEqual("6", info['devices'][1]['host'])
self.assertEqual("1", info['devices'][1]['channel'])
self.assertEqual("0", info['devices'][1]['id'])
self.assertEqual("3", info['devices'][1]['lun'])
| ntt-sic/cinder | cinder/tests/brick/test_brick_linuxscsi.py | Python | apache-2.0 | 8,511 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import uuid
from airflow.providers.amazon.aws.hooks.kinesis import AwsFirehoseHook
try:
from moto import mock_kinesis
except ImportError:
mock_kinesis = None
class TestAwsFirehoseHook(unittest.TestCase):
@unittest.skipIf(mock_kinesis is None, 'mock_kinesis package not present')
@mock_kinesis
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsFirehoseHook(
aws_conn_id='aws_default', delivery_stream="test_airflow", region_name="us-east-1"
)
self.assertIsNotNone(hook.get_conn())
@unittest.skipIf(mock_kinesis is None, 'mock_kinesis package not present')
@mock_kinesis
def test_insert_batch_records_kinesis_firehose(self):
hook = AwsFirehoseHook(
aws_conn_id='aws_default', delivery_stream="test_airflow", region_name="us-east-1"
)
response = hook.get_conn().create_delivery_stream(
DeliveryStreamName="test_airflow",
S3DestinationConfiguration={
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'BucketARN': 'arn:aws:s3:::kinesis-test',
'Prefix': 'airflow/',
'BufferingHints': {'SizeInMBs': 123, 'IntervalInSeconds': 124},
'CompressionFormat': 'UNCOMPRESSED',
},
)
stream_arn = response['DeliveryStreamARN']
self.assertEqual(stream_arn, "arn:aws:firehose:us-east-1:123456789012:deliverystream/test_airflow")
records = [{"Data": str(uuid.uuid4())} for _ in range(100)]
response = hook.put_records(records)
self.assertEqual(response['FailedPutCount'], 0)
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
| DinoCow/airflow | tests/providers/amazon/aws/hooks/test_kinesis.py | Python | apache-2.0 | 2,545 |
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import argparse
import base64
import fnmatch
import glob
import hashlib
import os
import re
import zipfile
from pants.util.contextutil import open_zip, temporary_dir
from pants.util.dirutil import read_file, safe_file_dump
def replace_in_file(workspace, src_file_path, from_str, to_str):
"""Replace from_str with to_str in the name and content of the given file.
If any edits were necessary, returns the new filename (which may be the same as the old
filename).
"""
from_bytes = from_str.encode("ascii")
to_bytes = to_str.encode("ascii")
data = read_file(os.path.join(workspace, src_file_path), binary_mode=True)
if from_bytes not in data and from_str not in src_file_path:
return None
dst_file_path = src_file_path.replace(from_str, to_str)
safe_file_dump(
os.path.join(workspace, dst_file_path), data.replace(from_bytes, to_bytes), mode="wb"
)
if src_file_path != dst_file_path:
os.unlink(os.path.join(workspace, src_file_path))
return dst_file_path
def any_match(globs, filename):
return any(fnmatch.fnmatch(filename, g) for g in globs)
def locate_dist_info_dir(workspace):
dir_suffix = "*.dist-info"
matches = glob.glob(os.path.join(workspace, dir_suffix))
if not matches:
raise Exception("Unable to locate `{}` directory in input whl.".format(dir_suffix))
if len(matches) > 1:
raise Exception("Too many `{}` directories in input whl: {}".format(dir_suffix, matches))
return os.path.relpath(matches[0], workspace)
def fingerprint_file(workspace, filename):
"""Given a relative filename located in a workspace, fingerprint the file.
Returns a tuple of fingerprint string and size string.
"""
content = read_file(os.path.join(workspace, filename), binary_mode=True)
fingerprint = hashlib.sha256(content)
b64_encoded = base64.b64encode(fingerprint.digest())
return f"sha256={b64_encoded.decode()}", str(len(content))
def rewrite_record_file(workspace, src_record_file, mutated_file_tuples):
"""Given a RECORD file and list of mutated file tuples, update the RECORD file in place.
The RECORD file should always be a member of the mutated files, due to both containing versions,
and having a version in its filename.
"""
mutated_files = set()
dst_record_file = None
for src, dst in mutated_file_tuples:
if src == src_record_file:
dst_record_file = dst
else:
mutated_files.add(dst)
if not dst_record_file:
raise Exception(
"Malformed whl or bad globs: `{}` was not rewritten.".format(src_record_file)
)
output_records = []
file_name = os.path.join(workspace, dst_record_file)
for line in read_file(file_name).splitlines():
filename, fingerprint_str, size_str = line.rsplit(",", 3)
if filename in mutated_files:
fingerprint_str, size_str = fingerprint_file(workspace, filename)
output_line = ",".join((filename, fingerprint_str, size_str))
else:
output_line = line
output_records.append(output_line)
safe_file_dump(file_name, "\r\n".join(output_records) + "\r\n")
# The wheel METADATA file will contain a line like: `Version: 1.11.0.dev3+7951ec01`.
# We don't parse the entire file because it's large (it contains the entire release notes history).
_version_re = re.compile(r"Version: (?P<version>\S+)")
def reversion(
*, whl_file: str, dest_dir: str, target_version: str, extra_globs: list[str] | None = None
) -> None:
all_globs = ["*.dist-info/*", "*-nspkg.pth", *(extra_globs or ())]
with temporary_dir() as workspace:
# Extract the input.
with open_zip(whl_file, "r") as whl:
src_filenames = whl.namelist()
whl.extractall(workspace)
# Determine the location of the `dist-info` directory.
dist_info_dir = locate_dist_info_dir(workspace)
record_file = os.path.join(dist_info_dir, "RECORD")
# Get version from the input whl's metadata.
input_version = None
metadata_file = os.path.join(workspace, dist_info_dir, "METADATA")
with open(metadata_file, "r") as info:
for line in info:
mo = _version_re.match(line)
if mo:
input_version = mo.group("version")
break
if not input_version:
raise Exception("Could not find `Version:` line in {}".format(metadata_file))
# Rewrite and move all files (including the RECORD file), recording which files need to be
# re-fingerprinted due to content changes.
dst_filenames = []
refingerprint = []
for src_filename in src_filenames:
if os.path.isdir(os.path.join(workspace, src_filename)):
continue
dst_filename = src_filename
if any_match(all_globs, src_filename):
rewritten = replace_in_file(workspace, src_filename, input_version, target_version)
if rewritten is not None:
dst_filename = rewritten
refingerprint.append((src_filename, dst_filename))
dst_filenames.append(dst_filename)
# Refingerprint relevant entries in the RECORD file under their new names.
rewrite_record_file(workspace, record_file, refingerprint)
# Create a new output whl in the destination.
dst_whl_filename = os.path.basename(whl_file).replace(input_version, target_version)
dst_whl_file = os.path.join(dest_dir, dst_whl_filename)
with open_zip(dst_whl_file, "w", zipfile.ZIP_DEFLATED) as whl:
for dst_filename in dst_filenames:
whl.write(os.path.join(workspace, dst_filename), dst_filename)
print("Wrote whl with version {} to {}.\n".format(target_version, dst_whl_file))
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("whl_file", help="The input whl file.")
parser.add_argument("dest_dir", help="The destination directory for the output whl.")
parser.add_argument("target_version", help="The target version of the output whl.")
parser.add_argument(
"--extra-globs",
action="append",
default=[],
help="Extra globs (fnmatch) to rewrite within the whl: may be specified multiple times.",
)
return parser
def main():
"""Given an input whl file and target version, create a copy of the whl with that version.
This is accomplished via string replacement in files matching a list of globs. Pass the optional
`--glob` argument to add additional globs: ie `--glob='thing-to-match*.txt'`.
"""
args = create_parser().parse_args()
reversion(
whl_file=args.whl_file,
dest_dir=args.dest_dir,
target_version=args.target_version,
extra_globs=args.extra_globs,
)
if __name__ == "__main__":
main()
| patricklaw/pants | build-support/bin/reversion.py | Python | apache-2.0 | 7,174 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.test_utils import assert_almost_equal
import numpy as np
from nose.tools import raises
from copy import deepcopy
import warnings
def test_parameter():
p = gluon.Parameter('weight', shape=(10, 10))
p.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])
assert len(p.list_data()) == 2
assert len(p.list_grad()) == 2
assert p.data(mx.cpu(1)).context == mx.cpu(1)
assert p.data(mx.cpu(0)).shape == (10, 10)
assert p.var().name == 'weight'
p.reset_ctx(ctx=[mx.cpu(1), mx.cpu(2)])
assert p.list_ctx() == [mx.cpu(1), mx.cpu(2)]
def test_paramdict():
params = gluon.ParameterDict('net_')
params.get('weight', shape=(10, 10))
assert list(params.keys()) == ['net_weight']
params.initialize(ctx=mx.cpu())
params.save('test.params')
params.load('test.params', mx.cpu())
def test_parameter_sharing():
class Net(gluon.Block):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.dense0 = nn.Dense(5, in_units=5)
self.dense1 = nn.Dense(5, in_units=5)
def forward(self, x):
return self.dense1(self.dense0(x))
net1 = Net(prefix='net1_')
net2 = Net(prefix='net2_', params=net1.collect_params())
net1.collect_params().initialize()
net2(mx.nd.zeros((3, 5)))
net1.save_params('net1.params')
net3 = Net(prefix='net3_')
net3.load_params('net1.params', mx.cpu())
def test_basic():
model = nn.Sequential()
model.add(nn.Dense(128, activation='tanh', in_units=10, flatten=False))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(64, activation='tanh', in_units=256),
nn.Dense(32, in_units=64))
model.add(nn.Activation('relu'))
# symbol
x = mx.sym.var('data')
y = model(x)
assert len(y.list_arguments()) == 7
# ndarray
model.collect_params().initialize(mx.init.Xavier(magnitude=2.24))
x = model(mx.nd.zeros((32, 2, 10)))
assert x.shape == (32, 32)
x.wait_to_read()
model.collect_params().setattr('grad_req', 'null')
assert list(model.collect_params().values())[0]._grad is None
model.collect_params().setattr('grad_req', 'write')
assert list(model.collect_params().values())[0]._grad is not None
def test_dense():
model = nn.Dense(128, activation='tanh', in_units=10, flatten=False, prefix='test_')
inputs = mx.sym.Variable('data')
outputs = model(inputs)
assert set(model.collect_params().keys()) == set(['test_weight', 'test_bias'])
assert outputs.list_outputs() == ['test_tanh_fwd_output']
args, outs, auxs = outputs.infer_shape(data=(2, 3, 10))
assert outs == [(2, 3, 128)]
model = nn.Dense(128, activation='relu', in_units=30, flatten=True, prefix='test2_')
inputs = mx.sym.Variable('data')
outputs = model(inputs)
assert set(model.collect_params().keys()) == set(['test2_weight', 'test2_bias'])
assert outputs.list_outputs() == ['test2_relu_fwd_output']
args, outs, auxs = outputs.infer_shape(data=(17, 2, 5, 3))
assert outs == [(17, 128)]
def test_symbol_block():
model = nn.HybridSequential()
model.add(nn.Dense(128, activation='tanh'))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(64, activation='tanh'),
nn.Dense(32, in_units=64))
model.add(nn.Activation('relu'))
model.initialize()
inputs = mx.sym.var('data')
outputs = model(inputs).get_internals()
smodel = gluon.SymbolBlock(outputs, inputs, params=model.collect_params())
assert len(smodel(mx.nd.zeros((16, 10)))) == 14
out = smodel(mx.sym.var('in'))
assert len(out) == len(outputs.list_outputs())
class Net(nn.HybridBlock):
def __init__(self, model):
super(Net, self).__init__()
self.model = model
def hybrid_forward(self, F, x):
out = self.model(x)
return F.add_n(*[i.sum() for i in out])
net = Net(smodel)
net.hybridize()
assert isinstance(net(mx.nd.zeros((16, 10))), mx.nd.NDArray)
inputs = mx.sym.var('data')
outputs = model(inputs)
smodel = gluon.SymbolBlock(outputs, inputs, params=model.collect_params())
net = Net(smodel)
net.hybridize()
assert isinstance(net(mx.nd.zeros((16, 10))), mx.nd.NDArray)
def check_layer_forward(layer, dshape):
layer.collect_params().initialize()
x = mx.nd.ones(shape=dshape)
x.attach_grad()
with mx.autograd.record():
out = layer(x)
out.backward()
np_out = out.asnumpy()
np_dx = x.grad.asnumpy()
layer.hybridize()
x = mx.nd.ones(shape=dshape)
x.attach_grad()
with mx.autograd.record():
out = layer(x)
out.backward()
mx.test_utils.assert_almost_equal(np_out, out.asnumpy(), rtol=1e-5, atol=1e-6)
mx.test_utils.assert_almost_equal(np_dx, x.grad.asnumpy(), rtol=1e-5, atol=1e-6)
def test_conv():
layers1d = [
nn.Conv1D(16, 3, in_channels=4),
nn.Conv1D(16, 3, groups=2, in_channels=4),
nn.Conv1D(16, 3, strides=3, groups=2, in_channels=4),
]
for layer in layers1d:
check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2D(16, (3, 4), in_channels=4),
nn.Conv2D(16, (5, 4), in_channels=4),
nn.Conv2D(16, (3, 4), groups=2, in_channels=4),
nn.Conv2D(16, (3, 4), strides=4, in_channels=4),
nn.Conv2D(16, (3, 4), dilation=4, in_channels=4),
nn.Conv2D(16, (3, 4), padding=4, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
layers3d = [
nn.Conv3D(16, (1, 8, 4), in_channels=4, activation='relu'),
nn.Conv3D(16, (5, 4, 3), in_channels=4),
nn.Conv3D(16, (3, 3, 3), groups=2, in_channels=4),
nn.Conv3D(16, 4, strides=4, in_channels=4),
nn.Conv3D(16, (3, 3, 3), padding=4, in_channels=4),
]
for layer in layers3d:
check_layer_forward(layer, (1, 4, 10, 10, 10))
layer = nn.Conv2D(16, (3, 3), layout='NHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 4))
layer = nn.Conv3D(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# check_layer_forward(layer, (1, 10, 10, 10, 4))
def test_deconv():
# layers1d = [
# nn.Conv1DTranspose(16, 3, in_channels=4),
# nn.Conv1DTranspose(16, 3, groups=2, in_channels=4),
# nn.Conv1DTranspose(16, 3, strides=3, groups=2, in_channels=4),
# ]
# for layer in layers1d:
# check_layer_forward(layer, (1, 4, 10))
layers2d = [
nn.Conv2DTranspose(16, (3, 4), in_channels=4),
nn.Conv2DTranspose(16, (5, 4), in_channels=4),
nn.Conv2DTranspose(16, (3, 4), groups=2, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), dilation=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), padding=4, in_channels=4),
nn.Conv2DTranspose(16, (3, 4), strides=4, output_padding=3, in_channels=4),
]
for layer in layers2d:
check_layer_forward(layer, (1, 4, 20, 20))
# layers3d = [
# nn.Conv3DTranspose(16, (1, 8, 4), in_channels=4),
# nn.Conv3DTranspose(16, (5, 4, 3), in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), groups=2, in_channels=4),
# nn.Conv3DTranspose(16, 4, strides=4, in_channels=4),
# nn.Conv3DTranspose(16, (3, 3, 3), padding=4, in_channels=4),
# ]
# for layer in layers3d:
# check_layer_forward(layer, (1, 4, 10, 10, 10))
#
#
# layer = nn.Conv2DTranspose(16, (3, 3), layout='NHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 4))
#
# layer = nn.Conv3DTranspose(16, (3, 3, 3), layout='NDHWC', in_channels=4)
# # check_layer_forward(layer, (1, 10, 10, 10, 4))
def test_pool():
layers1d = [
nn.MaxPool1D(),
nn.MaxPool1D(3),
nn.MaxPool1D(3, 2),
nn.AvgPool1D(),
nn.GlobalAvgPool1D(),
]
for layer in layers1d:
check_layer_forward(layer, (1, 2, 10))
layers2d = [
nn.MaxPool2D(),
nn.MaxPool2D((3, 3)),
nn.MaxPool2D(3, 2),
nn.AvgPool2D(),
nn.GlobalAvgPool2D(),
]
for layer in layers2d:
check_layer_forward(layer, (1, 2, 10, 10))
layers3d = [
nn.MaxPool3D(),
nn.MaxPool3D((3, 3, 3)),
nn.MaxPool3D(3, 2),
nn.AvgPool3D(),
nn.GlobalAvgPool3D(),
]
for layer in layers3d:
check_layer_forward(layer, (1, 2, 10, 10, 10))
# test ceil_mode
x = mx.nd.zeros((2, 2, 10, 10))
layer = nn.MaxPool2D(3, ceil_mode=False)
layer.collect_params().initialize()
assert (layer(x).shape==(2, 2, 3, 3))
layer = nn.MaxPool2D(3, ceil_mode=True)
layer.collect_params().initialize()
assert (layer(x).shape==(2, 2, 4, 4))
def test_batchnorm():
layer = nn.BatchNorm(in_channels=10)
check_layer_forward(layer, (2, 10, 10, 10))
def test_reshape():
x = mx.nd.ones((2, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x.reshape((-1,))
x = x + 10
x.backward()
def test_slice():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x[1:3]
x = x + 10
x.backward()
def test_at():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2, in_channels=4)
layer.collect_params().initialize()
with mx.autograd.record():
x = layer(x)
x = x[1]
x = x + 10
x.backward()
def test_deferred_init():
x = mx.nd.ones((5, 4, 10, 10))
layer = nn.Conv2D(10, 2)
layer.collect_params().initialize()
layer(x)
def check_split_data(x, num_slice, batch_axis, **kwargs):
res = gluon.utils.split_data(x, num_slice, batch_axis, **kwargs)
assert len(res) == num_slice
mx.test_utils.assert_almost_equal(mx.nd.concat(*res, dim=batch_axis).asnumpy(),
x.asnumpy())
def test_split_data():
x = mx.nd.random.uniform(shape=(128, 33, 64))
check_split_data(x, 8, 0)
check_split_data(x, 3, 1)
check_split_data(x, 4, 1, even_split=False)
check_split_data(x, 15, 1, even_split=False)
try:
check_split_data(x, 4, 1)
except ValueError:
return
assert False, "Should have failed"
def test_flatten():
flatten = nn.Flatten()
x = mx.nd.zeros((3,4,5,6))
assert flatten(x).shape == (3, 4*5*6)
x = mx.nd.zeros((3,6))
assert flatten(x).shape == (3, 6)
x = mx.nd.zeros((3,))
assert flatten(x).shape == (3, 1)
def test_trainer():
def dict_equ(a, b):
assert set(a) == set(b)
for k in a:
assert (a[k].asnumpy() == b[k].asnumpy()).all()
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert (x.data(mx.cpu(1)).asnumpy() == -2).all()
x.lr_mult = 0.5
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert (x.data(mx.cpu(1)).asnumpy() == -4).all()
trainer.save_states('test.states')
states = deepcopy(trainer._kvstore._updater.states) if trainer._update_on_kvstore \
else deepcopy(trainer._updaters[0].states)
trainer.load_states('test.states')
if trainer._update_on_kvstore:
dict_equ(trainer._kvstore._updater.states, states)
assert trainer._optimizer == trainer._kvstore._updater.optimizer
else:
for updater in trainer._updaters:
dict_equ(updater.states, states)
assert trainer._optimizer == trainer._updaters[0].optimizer
def test_block_attr_hidden():
b = gluon.Block()
# regular attributes can change types
b.a = None
b.a = 1
@raises(TypeError)
def test_block_attr_block():
b = gluon.Block()
# regular variables can't change types
b.b = gluon.Block()
b.b = (2,)
@raises(TypeError)
def test_block_attr_param():
b = gluon.Block()
# regular variables can't change types
b.b = gluon.Parameter()
b.b = (2,)
def test_block_attr_regular():
b = gluon.Block()
# set block attribute also sets _children
b.c = gluon.Block()
c2 = gluon.Block()
b.c = c2
assert b.c is c2 and b._children[0] is c2
def test_sequential_warning():
with warnings.catch_warnings(record=True) as w:
b = gluon.nn.Sequential()
b.add(gluon.nn.Dense(20))
b.hybridize()
assert len(w) == 1
def test_global_norm_clip():
x1 = mx.nd.ones((3,3))
x2 = mx.nd.ones((4,4))
norm = gluon.utils.clip_global_norm([x1, x2], 1.0)
assert norm == 5.0
assert_almost_equal(x1.asnumpy(), np.ones((3,3))/5)
assert_almost_equal(x2.asnumpy(), np.ones((4,4))/5)
x3 = mx.nd.array([1.0, 2.0, float('nan')])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
gluon.utils.clip_global_norm([x1, x3], 2.0)
assert len(w) == 1
def test_embedding():
layer = gluon.nn.Embedding(10, 100)
layer.initialize()
x = mx.nd.array([3,4,2,0,1])
with mx.autograd.record():
y = layer(x)
y.backward()
assert (layer.weight.grad()[:5] == 1).asnumpy().all()
assert (layer.weight.grad()[5:] == 0).asnumpy().all()
def test_export():
ctx = mx.context.current_context()
model = gluon.model_zoo.vision.resnet18_v1(
prefix='resnet', ctx=ctx, pretrained=True)
model.hybridize()
data = mx.nd.random.normal(shape=(1, 3, 224, 224))
out = model(data)
model.export('gluon')
module = mx.mod.Module.load('gluon', 0, label_names=None, context=ctx)
module.bind(data_shapes=[('data', data.shape)])
module.forward(mx.io.DataBatch([data], None), is_train=False)
mod_out, = module.get_outputs()
assert_almost_equal(out.asnumpy(), mod_out.asnumpy())
model2 = gluon.model_zoo.vision.resnet18_v1(prefix='resnet', ctx=ctx)
model2.collect_params().load('gluon-0000.params', ctx)
out2 = model2(data)
assert_almost_equal(out.asnumpy(), out2.asnumpy())
def test_hybrid_stale_cache():
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.add(mx.gluon.nn.Dense(10, weight_initializer='zeros', bias_initializer='ones', flatten=False))
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5)))
net.add(mx.gluon.nn.Flatten())
assert net(mx.nd.ones((2,3,5))).shape == (2, 30)
net = mx.gluon.nn.HybridSequential()
with net.name_scope():
net.fc1 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=False)
net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=False)
net.hybridize()
net.initialize()
net(mx.nd.ones((2,3,5)))
net.fc2 = mx.gluon.nn.Dense(10, weight_initializer='zeros',
bias_initializer='ones', flatten=True)
net.initialize()
assert net(mx.nd.ones((2,3,5))).shape == (2, 10)
if __name__ == '__main__':
import nose
nose.runmodule()
| jermainewang/mxnet | tests/python/unittest/test_gluon.py | Python | apache-2.0 | 16,438 |
"""
Django settings for the admin project.
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
from django.contrib import messages
from api.base.settings import * # noqa
# TODO ALL SETTINGS FROM API WILL BE IMPORTED AND WILL NEED TO BE OVERRRIDEN
# TODO THIS IS A STEP TOWARD INTEGRATING ADMIN & API INTO ONE PROJECT
# import local # Build own local.py (used with postgres)
# TODO - remove duplicated items, as this is now using settings from the API
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# from the OSF settings
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
# Don't allow migrations
DATABASE_ROUTERS = ['admin.base.db.router.NoMigrationRouter']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
# session:
SESSION_COOKIE_NAME = 'admin'
SESSION_COOKIE_SECURE = osf_settings.SECURE_MODE
SESSION_COOKIE_HTTPONLY = osf_settings.SESSION_COOKIE_HTTPONLY
# csrf:
CSRF_COOKIE_NAME = 'admin-csrf'
CSRF_COOKIE_SECURE = osf_settings.SECURE_MODE
# set to False: prereg uses a SPA and ajax and grab the token to use it in the requests
CSRF_COOKIE_HTTPONLY = False
ALLOWED_HOSTS = [
'.osf.io'
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 5,
}
},
]
USE_L10N = False
# Email settings. Account created for testing. Password shouldn't be hardcoded
# [DEVOPS] this should be set to 'django.core.mail.backends.smtp.EmailBackend' in the > dev local.py.
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Sendgrid Email Settings - Using OSF credentials.
# Add settings references to local.py
EMAIL_HOST = osf_settings.MAIL_SERVER
EMAIL_HOST_USER = osf_settings.MAIL_USERNAME
EMAIL_HOST_PASSWORD = osf_settings.MAIL_PASSWORD
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
# 3rd party
'raven.contrib.django.raven_compat',
'webpack_loader',
'django_nose',
'password_reset',
# OSF
'osf',
# Addons
'addons.osfstorage',
'addons.wiki',
'addons.twofactor',
# Internal apps
'admin.common_auth',
'admin.base',
'admin.pre_reg',
'admin.spam',
'admin.metrics',
'admin.nodes',
'admin.users',
'admin.desk',
'admin.meetings',
)
MIGRATION_MODULES = {
'osf': None,
'addons_osfstorage': None,
'addons_wiki': None,
'addons_twofactor': None,
}
USE_TZ = True
# local development using https
if osf_settings.SECURE_MODE and osf_settings.DEBUG_MODE:
INSTALLED_APPS += ('sslserver',)
# Custom user model (extends AbstractBaseUser)
AUTH_USER_MODEL = 'osf.OSFUser'
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'admin'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
CORS_ALLOW_CREDENTIALS = True
MIDDLEWARE_CLASSES = (
# TokuMX transaction support
# Needs to go before CommonMiddleware, so that transactions are always started,
# even in the event of a redirect. CommonMiddleware may cause other middlewares'
# process_request to be skipped, e.g. when a trailing slash is omitted
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.CeleryTaskMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
MESSAGE_TAGS = {
messages.SUCCESS: 'text-success',
messages.ERROR: 'text-danger',
messages.WARNING: 'text-warning',
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
}
}]
ROOT_URLCONF = 'admin.base.urls'
WSGI_APPLICATION = 'admin.base.wsgi.application'
ADMIN_BASE = ''
STATIC_URL = '/static/'
LOGIN_URL = 'account/login/'
LOGIN_REDIRECT_URL = ADMIN_BASE
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
LANGUAGE_CODE = 'en-us'
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'public/js/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
}
}
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--verbosity=2']
# Keen.io settings in local.py
KEEN_PROJECT_ID = osf_settings.KEEN['private']['project_id']
KEEN_READ_KEY = osf_settings.KEEN['private']['read_key']
KEEN_WRITE_KEY = osf_settings.KEEN['private']['write_key']
KEEN_CREDENTIALS = {
'keen_ready': False
}
if KEEN_CREDENTIALS['keen_ready']:
KEEN_CREDENTIALS.update({
'keen_project_id': KEEN_PROJECT_ID,
'keen_read_key': KEEN_READ_KEY,
'keen_write_key': KEEN_WRITE_KEY
})
ENTRY_POINTS = {'osf4m': 'osf4m', 'prereg_challenge_campaign': 'prereg',
'institution_campaign': 'institution'}
# Set in local.py
DESK_KEY = ''
DESK_KEY_SECRET = ''
| hmoco/osf.io | admin/base/settings/defaults.py | Python | apache-2.0 | 6,690 |
import collections
import datetime
import mock
import pytz
from babel import dates, Locale
from schema import Schema, And, Use, Or
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from nose.tools import * # noqa PEP8 asserts
from framework.auth import Auth
from framework.auth.core import User
from framework.auth.signals import contributor_removed
from framework.auth.signals import node_deleted
from framework.guid.model import Guid
from website.notifications.tasks import get_users_emails, send_users_email, group_by_node, remove_notifications
from website.notifications import constants
from website.notifications.model import NotificationDigest
from website.notifications.model import NotificationSubscription
from website.notifications import emails
from website.notifications import utils
from website.project.model import Node, Comment
from website import mails
from website.util import api_url_for
from website.util import web_url_for
from tests import factories
from tests.base import capture_signals
from tests.base import OsfTestCase
class TestNotificationsModels(OsfTestCase):
def setUp(self):
super(TestNotificationsModels, self).setUp()
# Create project with component
self.user = factories.UserFactory()
self.consolidate_auth = Auth(user=self.user)
self.parent = factories.ProjectFactory(creator=self.user)
self.node = factories.NodeFactory(creator=self.user, parent=self.parent)
def test_has_permission_on_children(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
sub_component.add_contributor(contributor=non_admin_user)
sub_component.save()
sub_component2 = factories.NodeFactory(parent=node)
assert_true(
node.has_permission_on_children(non_admin_user, 'read')
)
def test_check_user_has_permission_excludes_deleted_components(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
sub_component.add_contributor(contributor=non_admin_user)
sub_component.is_deleted = True
sub_component.save()
sub_component2 = factories.NodeFactory(parent=node)
assert_false(
node.has_permission_on_children(non_admin_user,'read')
)
def test_check_user_does_not_have_permission_on_private_node_child(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
assert_false(
node.has_permission_on_children(non_admin_user,'read')
)
def test_check_user_child_node_permissions_false_if_no_children(self):
non_admin_user = factories.UserFactory()
parent = factories.ProjectFactory()
parent.add_contributor(contributor=non_admin_user, permissions=['read'])
parent.save()
node = factories.NodeFactory(parent=parent, category='project')
assert_false(
node.has_permission_on_children(non_admin_user,'read')
)
def test_check_admin_has_permissions_on_private_component(self):
parent = factories.ProjectFactory()
node = factories.NodeFactory(parent=parent, category='project')
sub_component = factories.NodeFactory(parent=node)
assert_true(
node.has_permission_on_children(parent.creator,'read')
)
def test_check_user_private_node_child_permissions_excludes_pointers(self):
user = factories.UserFactory()
parent = factories.ProjectFactory()
pointed = factories.ProjectFactory(contributor=user)
parent.add_pointer(pointed, Auth(parent.creator))
parent.save()
assert_false(
parent.has_permission_on_children(user,'read')
)
class TestSubscriptionView(OsfTestCase):
def setUp(self):
super(TestSubscriptionView, self).setUp()
self.node = factories.NodeFactory()
self.user = self.node.creator
def test_create_new_subscription(self):
payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'email_transactional'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, payload, auth=self.node.creator.auth)
# check that subscription was created
event_id = self.node._id + '_' + 'comments'
s = NotificationSubscription.find_one(Q('_id', 'eq', event_id))
# check that user was added to notification_type field
assert_equal(payload['id'], s.owner._id)
assert_equal(payload['event'], s.event_name)
assert_in(self.node.creator, getattr(s, payload['notification_type']))
# change subscription
new_payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'email_digest'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, new_payload, auth=self.node.creator.auth)
s.reload()
assert_false(self.node.creator in getattr(s, payload['notification_type']))
assert_in(self.node.creator, getattr(s, new_payload['notification_type']))
def test_adopt_parent_subscription_default(self):
payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'adopt_parent'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, payload, auth=self.node.creator.auth)
event_id = self.node._id + '_' + 'comments'
# confirm subscription was not created
with assert_raises(NoResultsFound):
NotificationSubscription.find_one(Q('_id', 'eq', event_id))
def test_change_subscription_to_adopt_parent_subscription_removes_user(self):
payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'email_transactional'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, payload, auth=self.node.creator.auth)
# check that subscription was created
event_id = self.node._id + '_' + 'comments'
s = NotificationSubscription.find_one(Q('_id', 'eq', event_id))
# change subscription to adopt_parent
new_payload = {
'id': self.node._id,
'event': 'comments',
'notification_type': 'adopt_parent'
}
url = api_url_for('configure_subscription')
self.app.post_json(url, new_payload, auth=self.node.creator.auth)
s.reload()
# assert that user is removed from the subscription entirely
for n in constants.NOTIFICATION_TYPES:
assert_false(self.node.creator in getattr(s, n))
class TestRemoveContributor(OsfTestCase):
def setUp(self):
super(OsfTestCase, self).setUp()
self.project = factories.ProjectFactory()
self.contributor = factories.UserFactory()
self.project.add_contributor(contributor=self.contributor, permissions=['read'])
self.project.save()
self.subscription = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_comments',
owner=self.project
)
self.subscription.save()
self.subscription.email_transactional.append(self.contributor)
self.subscription.email_transactional.append(self.project.creator)
self.subscription.save()
self.node = factories.NodeFactory(parent=self.project)
self.node.add_contributor(contributor=self.project.creator, permissions=['read', 'write', 'admin'])
self.node.save()
self.node_subscription = factories.NotificationSubscriptionFactory(
_id=self.node._id + '_comments',
owner=self.node
)
self.node_subscription.save()
self.node_subscription.email_transactional.append(self.project.creator)
self.node_subscription.email_transactional.append(self.node.creator)
self.node_subscription.save()
def test_removed_non_admin_contributor_is_removed_from_subscriptions(self):
assert_in(self.contributor, self.subscription.email_transactional)
self.project.remove_contributor(self.contributor, auth=Auth(self.project.creator))
assert_not_in(self.contributor, self.project.contributors)
assert_not_in(self.contributor, self.subscription.email_transactional)
def test_removed_non_parent_admin_contributor_is_removed_from_subscriptions(self):
assert_in(self.node.creator, self.node_subscription.email_transactional)
self.node.remove_contributor(self.node.creator, auth=Auth(self.node.creator))
assert_not_in(self.node.creator, self.node.contributors)
assert_not_in(self.node.creator, self.node_subscription.email_transactional)
def test_removed_contributor_admin_on_parent_not_removed_from_node_subscription(self):
# Admin on parent project is removed as a contributor on a component. Check
# that admin is not removed from component subscriptions, as the admin
# now has read-only access.
assert_in(self.project.creator, self.node_subscription.email_transactional)
self.node.remove_contributor(self.project.creator, auth=Auth(self.project.creator))
assert_not_in(self.project.creator, self.node.contributors)
assert_in(self.project.creator, self.node_subscription.email_transactional)
def test_remove_contributor_signal_called_when_contributor_is_removed(self):
with capture_signals() as mock_signals:
self.project.remove_contributor(self.contributor, auth=Auth(self.project.creator))
assert_equal(mock_signals.signals_sent(), set([contributor_removed]))
class TestRemoveNodeSignal(OsfTestCase):
def test_node_subscriptions_and_backrefs_removed_when_node_is_deleted(self):
project = factories.ProjectFactory()
subscription = factories.NotificationSubscriptionFactory(
_id=project._id + '_comments',
owner=project
)
subscription.save()
subscription.email_transactional.append(project.creator)
subscription.save()
s = getattr(project.creator, 'email_transactional', [])
assert_equal(len(s), 1)
with capture_signals() as mock_signals:
project.remove_node(auth=Auth(project.creator))
assert_true(project.is_deleted)
assert_equal(mock_signals.signals_sent(), set([node_deleted]))
s = getattr(project.creator, 'email_transactional', [])
assert_equal(len(s), 0)
with assert_raises(NoResultsFound):
NotificationSubscription.find_one(Q('owner', 'eq', project))
def list_or_dict(data):
# Generator only returns lists or dicts from list or dict
if isinstance(data, dict):
for key in data:
if isinstance(data[key], dict) or isinstance(data[key], list):
yield data[key]
elif isinstance(data, list):
for item in data:
if isinstance(item, dict) or isinstance(item, list):
yield item
def has(data, sub_data):
# Recursive approach to look for a subset of data in data.
# WARNING: Don't use on huge structures
# :param data: Data structure
# :param sub_data: subset being checked for
# :return: True or False
try:
(item for item in data if item == sub_data).next()
return True
except StopIteration:
lists_and_dicts = list_or_dict(data)
for item in lists_and_dicts:
if has(item, sub_data):
return True
return False
def subscription_schema(project, structure, level=0):
# builds a schema from a list of nodes and events
# :param project: validation type
# :param structure: list of nodes (another list) and events
# :return: schema
sub_list = []
for item in list_or_dict(structure):
sub_list.append(subscription_schema(project, item, level=level+1))
sub_list.append(event_schema(level))
node_schema = {
'node': {
'id': Use(type(project._id), error="node_id{}".format(level)),
'title': Use(type(project.title), error="node_title{}".format(level)),
'url': Use(type(project.url), error="node_{}".format(level))
},
'kind': And(str, Use(lambda s: s in ('node', 'folder'),
error="kind didn't match node or folder {}".format(level))),
'nodeType': Use(lambda s: s in ('project', 'component'), error='nodeType not project or component'),
'category': Use(lambda s: s in Node.CATEGORY_MAP, error='category not in Node.CATEGORY_MAP'),
'permissions': {
'view': Use(lambda s: s in (True, False), error='view permissions is not True/False')
},
'children': sub_list
}
if level == 0:
return Schema([node_schema])
return node_schema
def event_schema(level=None):
return {
'event': {
'title': And(Use(str, error="event_title{} not a string".format(level)),
Use(lambda s: s in constants.NOTIFICATION_TYPES,
error="event_title{} not in list".format(level))),
'description': And(Use(str, error="event_desc{} not a string".format(level)),
Use(lambda s: s in constants.NODE_SUBSCRIPTIONS_AVAILABLE,
error="event_desc{} not in list".format(level))),
'notificationType': And(str, Or('adopt_parent', lambda s: s in constants.NOTIFICATION_TYPES)),
'parent_notification_type': Or(None, 'adopt_parent', lambda s: s in constants.NOTIFICATION_TYPES)
},
'kind': 'event',
'children': And(list, lambda l: len(l) == 0)
}
class TestNotificationUtils(OsfTestCase):
def setUp(self):
super(TestNotificationUtils, self).setUp()
self.user = factories.UserFactory()
self.project = factories.ProjectFactory(creator=self.user)
self.project_subscription = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_' + 'comments',
owner=self.project,
event_name='comments'
)
self.project_subscription.save()
self.project_subscription.email_transactional.append(self.user)
self.project_subscription.save()
self.node = factories.NodeFactory(parent=self.project, creator=self.user)
self.node_subscription = factories.NotificationSubscriptionFactory(
_id=self.node._id + '_' + 'comments',
owner=self.node,
event_name='comments'
)
self.node_subscription.save()
self.node_subscription.email_transactional.append(self.user)
self.node_subscription.save()
self.user_subscription = factories.NotificationSubscriptionFactory(
_id=self.user._id + '_' + 'comment_replies',
owner=self.user,
event_name='comment_replies'
)
self.user_subscription.save()
self.user_subscription.email_transactional.append(self.user)
self.user_subscription.save()
def test_to_subscription_key(self):
key = utils.to_subscription_key('xyz', 'comments')
assert_equal(key, 'xyz_comments')
def test_from_subscription_key(self):
parsed_key = utils.from_subscription_key('xyz_comment_replies')
assert_equal(parsed_key, {
'uid': 'xyz',
'event': 'comment_replies'
})
def test_get_all_user_subscriptions(self):
user_subscriptions = [x for x in utils.get_all_user_subscriptions(self.user)]
assert_in(self.project_subscription, user_subscriptions)
assert_in(self.node_subscription, user_subscriptions)
assert_in(self.user_subscription, user_subscriptions)
assert_equal(len(user_subscriptions), 3)
def test_get_all_node_subscriptions_given_user_subscriptions(self):
user_subscriptions = utils.get_all_user_subscriptions(self.user)
node_subscriptions = [x for x in utils.get_all_node_subscriptions(self.user, self.node,
user_subscriptions=user_subscriptions)]
assert_equal(node_subscriptions, [self.node_subscription])
def test_get_all_node_subscriptions_given_user_and_node(self):
node_subscriptions = [x for x in utils.get_all_node_subscriptions(self.user, self.node)]
assert_equal(node_subscriptions, [self.node_subscription])
def test_get_configured_project_ids_does_not_return_user_or_node_ids(self):
configured_ids = utils.get_configured_projects(self.user)
# No dupilcates!
assert_equal(len(configured_ids), 1)
assert_in(self.project._id, configured_ids)
assert_not_in(self.node._id, configured_ids)
assert_not_in(self.user._id, configured_ids)
def test_get_configured_project_ids_excludes_deleted_projects(self):
project = factories.ProjectFactory()
subscription = factories.NotificationSubscriptionFactory(
_id=project._id + '_' + 'comments',
owner=project
)
subscription.save()
subscription.email_transactional.append(self.user)
subscription.save()
project.is_deleted = True
project.save()
assert_not_in(project._id, utils.get_configured_projects(self.user))
def test_get_configured_project_ids_excludes_node_with_project_category(self):
node = factories.NodeFactory(parent=self.project, category='project')
node_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_' + 'comments',
owner=node,
event_name='comments'
)
node_subscription.save()
node_subscription.email_transactional.append(self.user)
node_subscription.save()
assert_not_in(node._id, utils.get_configured_projects(self.user))
def test_get_configured_project_ids_includes_top_level_private_projects_if_subscriptions_on_node(self):
private_project = factories.ProjectFactory()
node = factories.NodeFactory(parent=private_project)
node_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_comments',
owner=node,
event_name='comments'
)
node_subscription.email_transactional.append(node.creator)
node_subscription.save()
configured_project_ids = utils.get_configured_projects(node.creator)
assert_in(private_project._id, configured_project_ids)
def test_get_configured_project_ids_excludes_private_projects_if_no_subscriptions_on_node(self):
private_project = factories.ProjectFactory()
node = factories.NodeFactory(parent=private_project)
configured_project_ids = utils.get_configured_projects(node.creator)
assert_not_in(private_project._id, configured_project_ids)
def test_get_parent_notification_type(self):
nt = utils.get_parent_notification_type(self.node, 'comments', self.user)
assert_equal(nt, 'email_transactional')
def test_get_parent_notification_type_no_parent_subscriptions(self):
node = factories.NodeFactory()
nt = utils.get_parent_notification_type(node._id, 'comments', self.user)
assert_equal(nt, None)
def test_get_parent_notification_type_no_parent(self):
project = factories.ProjectFactory()
nt = utils.get_parent_notification_type(project._id, 'comments', self.user)
assert_equal(nt, None)
def test_get_parent_notification_type_handles_user_id(self):
nt = utils.get_parent_notification_type(self.user._id, 'comments', self.user)
assert_equal(nt, None)
def test_format_data_project_settings(self):
data = utils.format_data(self.user, [self.project._id])
parent_event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
}
child_event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': []
}
expected_new = [['event'], 'event']
schema = subscription_schema(self.project, expected_new)
assert schema.validate(data)
assert has(data, parent_event)
assert has(data, child_event)
def test_format_data_node_settings(self):
data = utils.format_data(self.user, [self.node._id])
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': []
}
schema = subscription_schema(self.project, ['event'])
assert schema.validate(data)
assert has(data, event)
def test_format_includes_admin_view_only_component_subscriptions(self):
# Test private components in which parent project admins are not contributors still appear in their
# notifications settings.
node = factories.NodeFactory(parent=self.project)
data = utils.format_data(self.user, [self.project._id])
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'adopt_parent',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': [],
}
schema = subscription_schema(self.project, ['event', ['event'], ['event']])
assert schema.validate(data)
assert has(data, event)
def test_format_data_excludes_pointers(self):
project = factories.ProjectFactory()
subscription = factories.NotificationSubscriptionFactory(
_id=project._id + '_comments',
owner=project,
event_name='comments'
)
subscription.email_transactional.append(project.creator)
subscription.save()
pointed = factories.ProjectFactory()
project.add_pointer(pointed, Auth(project.creator))
project.save()
configured_project_ids = utils.get_configured_projects(project.creator)
data = utils.format_data(project.creator, configured_project_ids)
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': [],
}
schema = subscription_schema(self.project, ['event'])
assert schema.validate(data)
assert has(data, event)
def test_format_data_user_subscriptions_includes_private_parent_if_configured_children(self):
private_project = factories.ProjectFactory()
node = factories.NodeFactory(parent=private_project)
node_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_comments',
owner=node,
event_name='comments'
)
node_subscription.email_transactional.append(node.creator)
node_subscription.save()
configured_project_ids = utils.get_configured_projects(node.creator)
data = utils.format_data(node.creator, configured_project_ids)
event = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': [],
}
schema = subscription_schema(self.project, ['event', ['event']])
assert schema.validate(data)
assert has(data, event)
def test_format_user_subscriptions(self):
data = utils.format_user_subscriptions(self.user)
expected = [{
'event': {
'title': 'comment_replies',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['comment_replies'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': [],
}]
assert_equal(data, expected)
def test_format_data_user_settings(self):
data = utils.format_user_and_project_subscriptions(self.user)
expected = [
{
'node': {
'id': self.user._id,
'title': 'User Notifications'
},
'kind': 'heading',
'children': utils.format_user_subscriptions(self.user)
},
{
'node': {
'id': '',
'title': 'Project Notifications'
},
'kind': 'heading',
'children': utils.format_data(self.user, utils.get_configured_projects(self.user))
}]
assert_equal(data, expected)
def test_serialize_user_level_event(self):
user_subscriptions = [x for x in utils.get_all_user_subscriptions(self.user)]
user_subscription = None
for subscription in user_subscriptions:
if 'comment_replies' in getattr(subscription, 'event_name'):
user_subscription = subscription
data = utils.serialize_event(self.user, event_description='comment_replies',
subscription=user_subscription)
expected = {
'event': {
'title': 'comment_replies',
'description': constants.USER_SUBSCRIPTIONS_AVAILABLE['comment_replies'],
'notificationType': 'email_transactional',
'parent_notification_type': None
},
'kind': 'event',
'children': []
}
assert_equal(data, expected)
def test_serialize_node_level_event(self):
node_subscriptions = [x for x in utils.get_all_node_subscriptions(self.user, self.node)]
data = utils.serialize_event(user=self.user, event_description='comments',
subscription=node_subscriptions[0], node=self.node)
expected = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'email_transactional',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': [],
}
assert_equal(data, expected)
def test_serialize_node_level_event_that_adopts_parent_settings(self):
user = factories.UserFactory()
self.project.add_contributor(contributor=user, permissions=['read'])
self.project.save()
self.project_subscription.email_transactional.append(user)
self.project_subscription.save()
self.node.add_contributor(contributor=user, permissions=['read'])
self.node.save()
node_subscriptions = [x for x in utils.get_all_node_subscriptions(user, self.node)]
data = utils.serialize_event(user=user, event_description='comments',
subscription=node_subscriptions, node=self.node)
expected = {
'event': {
'title': 'comments',
'description': constants.NODE_SUBSCRIPTIONS_AVAILABLE['comments'],
'notificationType': 'adopt_parent',
'parent_notification_type': 'email_transactional'
},
'kind': 'event',
'children': [],
}
assert_equal(data, expected)
class TestNotificationsDict(OsfTestCase):
def test_notifications_dict_add_message_returns_proper_format(self):
d = utils.NotificationsDict()
message = {
'message': 'Freddie commented on your project',
'timestamp': datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
}
message2 = {
'message': 'Mercury commented on your component',
'timestamp': datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
}
d.add_message(['project'], message)
d.add_message(['project', 'node'], message2)
expected = {
'messages': [],
'children': collections.defaultdict(
utils.NotificationsDict, {
'project': {
'messages': [message],
'children': collections.defaultdict(utils.NotificationsDict, {
'node': {
'messages': [message2],
'children': collections.defaultdict(utils.NotificationsDict, {})
}
})
}
}
)}
assert_equal(d, expected)
class TestCompileSubscriptions(OsfTestCase):
def setUp(self):
super(TestCompileSubscriptions, self).setUp()
self.user_1 = factories.UserFactory()
self.user_2 = factories.UserFactory()
self.user_3 = factories.UserFactory()
self.user_4 = factories.UserFactory()
# Base project + 1 project shared with 3 + 1 project shared with 2
self.base_project = factories.ProjectFactory(is_public=False, creator=self.user_1)
self.shared_node = factories.NodeFactory(parent=self.base_project, is_public=False, creator=self.user_1)
self.private_node = factories.NodeFactory(parent=self.base_project, is_public=False, creator=self.user_1)
# Adding contributors
for node in [self.base_project, self.shared_node, self.private_node]:
node.add_contributor(self.user_2, permissions='admin')
self.base_project.add_contributor(self.user_3, permissions='write')
self.shared_node.add_contributor(self.user_3, permissions='write')
# Setting basic subscriptions
self.base_sub = factories.NotificationSubscriptionFactory(
_id=self.base_project._id + '_file_updated',
owner=self.base_project,
event_name='file_updated'
)
self.base_sub.save()
self.shared_sub = factories.NotificationSubscriptionFactory(
_id=self.shared_node._id + '_file_updated',
owner=self.shared_node,
event_name='file_updated'
)
self.shared_sub.save()
self.private_sub = factories.NotificationSubscriptionFactory(
_id=self.private_node._id + '_file_updated',
owner=self.private_node,
event_name='file_updated'
)
self.private_sub.save()
def test_no_subscription(self):
node = factories.NodeFactory()
result = emails.compile_subscriptions(node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [], 'email_digest': []}, result)
def test_no_subscribers(self):
node = factories.NodeFactory()
node_sub = factories.NotificationSubscriptionFactory(
_id=node._id + '_file_updated',
owner=node,
event_name='file_updated'
)
node_sub.save()
result = emails.compile_subscriptions(node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [], 'email_digest': []}, result)
def test_creator_subbed_parent(self):
# Basic sub check
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
result = emails.compile_subscriptions(self.base_project, 'file_updated')
assert_equal({'email_transactional': [self.user_1._id], 'none': [], 'email_digest': []}, result)
def test_creator_subbed_to_parent_from_child(self):
# checks the parent sub is the one to appear without a child sub
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
result = emails.compile_subscriptions(self.shared_node, 'file_updated')
assert_equal({'email_transactional': [self.user_1._id], 'none': [], 'email_digest': []}, result)
def test_creator_subbed_to_both_from_child(self):
# checks that only one sub is in the list.
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
self.shared_sub.email_transactional.append(self.user_1)
self.shared_sub.save()
result = emails.compile_subscriptions(self.shared_node, 'file_updated')
assert_equal({'email_transactional': [self.user_1._id], 'none': [], 'email_digest': []}, result)
def test_creator_diff_subs_to_both_from_child(self):
# Check that the child node sub overrides the parent node sub
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
self.shared_sub.none.append(self.user_1)
self.shared_sub.save()
result = emails.compile_subscriptions(self.shared_node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [self.user_1._id], 'email_digest': []}, result)
def test_user_wo_permission_on_child_node_not_listed(self):
# Tests to see if a user without permission gets an Email about a node they cannot see.
self.base_sub.email_transactional.append(self.user_3)
self.base_sub.save()
result = emails.compile_subscriptions(self.private_node, 'file_updated')
assert_equal({'email_transactional': [], 'none': [], 'email_digest': []}, result)
def test_several_nodes_deep(self):
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
node2 = factories.NodeFactory(parent=self.shared_node)
node3 = factories.NodeFactory(parent=node2)
node4 = factories.NodeFactory(parent=node3)
node5 = factories.NodeFactory(parent=node4)
subs = emails.compile_subscriptions(node5, 'file_updated')
assert_equal(subs, {'email_transactional': [self.user_1._id], 'email_digest': [], 'none': []})
def test_several_nodes_deep_precedence(self):
self.base_sub.email_transactional.append(self.user_1)
self.base_sub.save()
node2 = factories.NodeFactory(parent=self.shared_node)
node3 = factories.NodeFactory(parent=node2)
node4 = factories.NodeFactory(parent=node3)
node4_subscription = factories.NotificationSubscriptionFactory(
_id=node4._id + '_file_updated',
owner=node4,
event_name='file_updated'
)
node4_subscription.save()
node4_subscription.email_digest.append(self.user_1)
node4_subscription.save()
node5 = factories.NodeFactory(parent=node4)
subs = emails.compile_subscriptions(node5, 'file_updated')
assert_equal(subs, {'email_transactional': [], 'email_digest': [self.user_1._id], 'none': []})
class TestMoveSubscription(OsfTestCase):
def setUp(self):
super(TestMoveSubscription, self).setUp()
self.blank = {key: [] for key in constants.NOTIFICATION_TYPES} # For use where it is blank.
self.user_1 = factories.AuthUserFactory()
self.auth = Auth(user=self.user_1)
self.user_2 = factories.AuthUserFactory()
self.user_3 = factories.AuthUserFactory()
self.user_4 = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user_1)
self.private_node = factories.NodeFactory(parent=self.project, is_public=False, creator=self.user_1)
self.sub = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_file_updated',
owner=self.project,
event_name='file_updated'
)
self.sub.email_transactional.extend([self.user_1])
self.sub.save()
self.file_sub = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_xyz42_file_updated',
owner=self.project,
event_name='xyz42_file_updated'
)
self.file_sub.save()
def test_separate_users(self):
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
subbed, removed = utils.separate_users(
self.private_node, [self.user_2._id, self.user_3._id, self.user_4._id]
)
assert_equal([self.user_2._id, self.user_3._id], subbed)
assert_equal([self.user_4._id], removed)
def test_event_subs_same(self):
self.file_sub.email_transactional.extend([self.user_2, self.user_3, self.user_4])
self.file_sub.save()
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
assert_equal({'email_transactional': [self.user_4._id], 'email_digest': [], 'none': []}, results)
def test_event_nodes_same(self):
self.file_sub.email_transactional.extend([self.user_2, self.user_3, self.user_4])
self.file_sub.save()
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.project)
assert_equal({'email_transactional': [], 'email_digest': [], 'none': []}, results)
def test_move_sub(self):
# Tests old sub is replaced with new sub.
utils.move_subscription(self.blank, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
assert_equal('abc42_file_updated', self.file_sub.event_name)
assert_equal(self.private_node, self.file_sub.owner)
assert_equal(self.private_node._id + '_abc42_file_updated', self.file_sub._id)
def test_move_sub_with_none(self):
# Attempt to reproduce an error that is seen when moving files
self.project.add_contributor(self.user_2, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.file_sub.none.append(self.user_2)
self.file_sub.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
assert_equal({'email_transactional': [], 'email_digest': [], 'none': [self.user_2._id]}, results)
def test_remove_one_user(self):
# One user doesn't have permissions on the node the sub is moved to. Should be listed.
self.file_sub.email_transactional.extend([self.user_2, self.user_3, self.user_4])
self.file_sub.save()
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
assert_equal({'email_transactional': [self.user_4._id], 'email_digest': [], 'none': []}, results)
def test_remove_one_user_warn_another(self):
# Two users do not have permissions on new node, but one has a project sub. Both should be listed.
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.save()
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.sub.email_digest.append(self.user_3)
self.sub.save()
self.file_sub.email_transactional.extend([self.user_2, self.user_4])
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
utils.move_subscription(results, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
assert_equal({'email_transactional': [self.user_4._id], 'email_digest': [self.user_3._id], 'none': []}, results)
assert_in(self.user_3, self.sub.email_digest) # Is not removed from the project subscription.
def test_warn_user(self):
# One user with a project sub does not have permission on new node. User should be listed.
self.private_node.add_contributor(self.user_2, permissions=['admin', 'write', 'read'], auth=self.auth)
self.private_node.save()
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.sub.email_digest.append(self.user_3)
self.sub.save()
self.file_sub.email_transactional.extend([self.user_2])
results = utils.users_to_remove('xyz42_file_updated', self.project, self.private_node)
utils.move_subscription(results, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
assert_equal({'email_transactional': [], 'email_digest': [self.user_3._id], 'none': []}, results)
assert_in(self.user_3, self.sub.email_digest) # Is not removed from the project subscription.
def test_user_node_subbed_and_not_removed(self):
self.project.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.project.save()
self.private_node.add_contributor(self.user_3, permissions=['write', 'read'], auth=self.auth)
self.private_node.save()
self.sub.email_digest.append(self.user_3)
self.sub.save()
utils.move_subscription(self.blank, 'xyz42_file_updated', self.project, 'abc42_file_updated', self.private_node)
assert_equal([], self.file_sub.email_digest)
class TestSendEmails(OsfTestCase):
def setUp(self):
super(TestSendEmails, self).setUp()
self.user = factories.AuthUserFactory()
self.project = factories.ProjectFactory()
self.project_subscription = factories.NotificationSubscriptionFactory(
_id=self.project._id + '_' + 'comments',
owner=self.project,
event_name='comments'
)
self.project_subscription.save()
self.project_subscription.email_transactional.append(self.project.creator)
self.project_subscription.save()
self.node = factories.NodeFactory(parent=self.project)
self.node_subscription = factories.NotificationSubscriptionFactory(
_id=self.node._id + '_comments',
owner=self.node,
event_name='comments'
)
self.node_subscription.save()
self.user_subscription = factories.NotificationSubscriptionFactory(
_id=self.user._id + '_' + 'comment_replies',
owner=self.user,
event_name='comment_replies',
email_transactional=[self.user._id]
)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_no_subscription(self, mock_store):
node = factories.NodeFactory()
emails.notify('comments', user=self.user, node=node, timestamp=datetime.datetime.utcnow())
assert_false(mock_store.called)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_no_subscribers(self, mock_store):
node = factories.NodeFactory()
node_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_comments',
owner=node,
event_name='comments'
)
node_subscription.save()
emails.notify('comments', user=self.user, node=node, timestamp=datetime.datetime.utcnow())
assert_false(mock_store.called)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_with_correct_args(self, mock_store):
time_now = datetime.datetime.utcnow()
emails.notify('comments', user=self.user, node=self.node, timestamp=time_now)
assert_true(mock_store.called)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comments', self.user,
self.node, time_now)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_does_not_send_to_users_subscribed_to_none(self, mock_store):
node = factories.NodeFactory()
user = factories.UserFactory()
node_subscription = factories.NotificationSubscriptionFactory(
_id=node._id + '_comments',
owner=node,
event_name='comments'
)
node_subscription.save()
node_subscription.none.append(user)
node_subscription.save()
sent = emails.notify('comments', user=user, node=node, timestamp=datetime.datetime.utcnow())
assert_false(mock_store.called)
assert_equal(sent, [])
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_reply_event_if_comment_is_direct_reply(self, mock_store):
time_now = datetime.datetime.utcnow()
emails.notify('comments', user=self.user, node=self.node, timestamp=time_now, target_user=self.project.creator)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comment_replies',
self.user, self.node, time_now, target_user=self.project.creator)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_reply_when_target_user_is_subscribed_via_user_settings(self, mock_store):
time_now = datetime.datetime.utcnow()
emails.notify('comment_replies', user=self.project.creator, node=self.node, timestamp=time_now, target_user=self.user)
mock_store.assert_called_with([self.user._id], 'email_transactional', 'comment_replies',
self.project.creator, self.node, time_now, target_user=self.user)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_event_if_comment_reply_is_not_direct_reply(self, mock_store):
user = factories.UserFactory()
time_now = datetime.datetime.utcnow()
emails.notify('comments', user=user, node=self.node, timestamp=time_now, target_user=user)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comments', user,
self.node, time_now, target_user=user)
@mock.patch('website.mails.send_mail')
@mock.patch('website.notifications.emails.store_emails')
def test_notify_does_not_send_comment_if_they_reply_to_their_own_comment(self, mock_store, mock_send_mail):
time_now = datetime.datetime.utcnow()
emails.notify('comments', user=self.project.creator, node=self.project, timestamp=time_now,
target_user=self.project.creator)
assert_false(mock_store.called)
assert_false(mock_send_mail.called)
@mock.patch('website.notifications.emails.store_emails')
def test_notify_sends_comment_event_if_comment_reply_is_not_direct_reply_on_component(self, mock_store):
# Test that comment replies on components that are not direct replies to the subscriber use the
# "comments" email template.
user = factories.UserFactory()
time_now = datetime.datetime.utcnow()
emails.notify('comments', user, self.node, time_now, target_user=user)
mock_store.assert_called_with([self.project.creator._id], 'email_transactional', 'comments', user,
self.node, time_now, target_user=user)
def test_check_node_node_none(self):
subs = emails.check_node(None, 'comments')
assert_equal(subs, {'email_transactional': [], 'email_digest': [], 'none': []})
def test_check_node_one(self):
subs = emails.check_node(self.project, 'comments')
assert_equal(subs, {'email_transactional': [self.project.creator._id], 'email_digest': [], 'none': []})
@mock.patch('website.project.views.comment.notify')
def test_check_user_comment_reply_subscription_if_email_not_sent_to_target_user(self, mock_notify):
# user subscribed to comment replies
user = factories.UserFactory()
user_subscription = factories.NotificationSubscriptionFactory(
_id=user._id + '_comments',
owner=user,
event_name='comment_replies'
)
user_subscription.email_transactional.append(user)
user_subscription.save()
# user is not subscribed to project comment notifications
project = factories.ProjectFactory()
# user comments on project
target = factories.CommentFactory(node=project, user=user)
content = 'hammer to fall'
# reply to user (note: notify is called from Comment.create)
reply = Comment.create(
auth=Auth(project.creator),
user=project.creator,
node=project,
content=content,
target=Guid.load(target._id),
is_public=True,
)
assert_true(mock_notify.called)
assert_equal(mock_notify.call_count, 2)
def test_get_settings_url_for_node(self):
url = emails.get_settings_url(self.project._id, self.user)
assert_equal(url, self.project.absolute_url + 'settings/')
def test_get_settings_url_for_user(self):
url = emails.get_settings_url(self.user._id, self.user)
assert_equal(url, web_url_for('user_notifications', _absolute=True))
def test_get_node_lineage(self):
node_lineage = emails.get_node_lineage(self.node)
assert_equal(node_lineage, [self.project._id, self.node._id])
def test_localize_timestamp(self):
timestamp = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
self.user.timezone = 'America/New_York'
self.user.locale = 'en_US'
self.user.save()
tz = dates.get_timezone(self.user.timezone)
locale = Locale(self.user.locale)
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
def test_localize_timestamp_empty_timezone(self):
timestamp = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
self.user.timezone = ''
self.user.locale = 'en_US'
self.user.save()
tz = dates.get_timezone('Etc/UTC')
locale = Locale(self.user.locale)
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
def test_localize_timestamp_empty_locale(self):
timestamp = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
self.user.timezone = 'America/New_York'
self.user.locale = ''
self.user.save()
tz = dates.get_timezone(self.user.timezone)
locale = Locale('en')
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
def test_localize_timestamp_handles_unicode(self):
timestamp = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
self.user.timezone = 'Europe/Moscow'
self.user.locale = 'ru_RU'
self.user.save()
tz = dates.get_timezone(self.user.timezone)
locale = Locale(self.user.locale)
formatted_date = dates.format_date(timestamp, format='full', locale=locale)
formatted_time = dates.format_time(timestamp, format='short', tzinfo=tz, locale=locale)
formatted_datetime = u'{time} on {date}'.format(time=formatted_time, date=formatted_date)
assert_equal(emails.localize_timestamp(timestamp, self.user), formatted_datetime)
class TestSendDigest(OsfTestCase):
def setUp(self):
super(TestSendDigest, self).setUp()
self.user_1 = factories.UserFactory()
self.user_2 = factories.UserFactory()
self.project = factories.ProjectFactory()
self.timestamp = datetime.datetime.utcnow()
def test_group_notifications_by_user_transactional(self):
send_type = 'email_transactional'
d = factories.NotificationDigestFactory(
user_id=self.user_1._id,
send_type=send_type,
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d.save()
d2 = factories.NotificationDigestFactory(
user_id=self.user_2._id,
send_type=send_type,
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d2.save()
d3 = factories.NotificationDigestFactory(
user_id=self.user_2._id,
send_type='email_digest',
timestamp=self.timestamp,
message='Hello, but this should not appear (this is a digest)',
node_lineage=[self.project._id]
)
d3.save()
user_groups = get_users_emails(send_type)
expected = [
{
u'user_id': self.user_1._id,
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': d._id
}]
},
{
u'user_id': self.user_2._id,
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': d2._id
}]
}
]
assert_equal(len(user_groups), 2)
assert_equal(user_groups, expected)
digest_ids = [d._id, d2._id, d3._id]
remove_notifications(email_notification_ids=digest_ids)
def test_group_notifications_by_user_digest(self):
send_type = 'email_digest'
d = factories.NotificationDigestFactory(
user_id=self.user_1._id,
send_type=send_type,
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d.save()
d2 = factories.NotificationDigestFactory(
user_id=self.user_2._id,
send_type=send_type,
timestamp=self.timestamp,
message='Hello',
node_lineage=[self.project._id]
)
d2.save()
d3 = factories.NotificationDigestFactory(
user_id=self.user_2._id,
send_type='email_transactional',
timestamp=self.timestamp,
message='Hello, but this should not appear (this is transactional)',
node_lineage=[self.project._id]
)
d3.save()
user_groups = get_users_emails(send_type)
expected = [
{
u'user_id': self.user_1._id,
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': d._id
}]
},
{
u'user_id': self.user_2._id,
u'info': [{
u'message': u'Hello',
u'node_lineage': [unicode(self.project._id)],
u'_id': d2._id
}]
}
]
assert_equal(len(user_groups), 2)
assert_equal(user_groups, expected)
digest_ids = [d._id, d2._id, d3._id]
remove_notifications(email_notification_ids=digest_ids)
@mock.patch('website.mails.send_mail')
def test_send_users_email_called_with_correct_args(self, mock_send_mail):
send_type = 'email_transactional'
d = factories.NotificationDigestFactory(
user_id=factories.UserFactory()._id,
send_type=send_type,
timestamp=datetime.datetime.utcnow(),
message='Hello',
node_lineage=[factories.ProjectFactory()._id]
)
d.save()
user_groups = get_users_emails(send_type)
send_users_email(send_type)
assert_true(mock_send_mail.called)
assert_equals(mock_send_mail.call_count, len(user_groups))
last_user_index = len(user_groups) - 1
user = User.load(user_groups[last_user_index]['user_id'])
email_notification_ids = [message['_id'] for message in user_groups[last_user_index]['info']]
args, kwargs = mock_send_mail.call_args
assert_equal(kwargs['to_addr'], user.username)
assert_equal(kwargs['mimetype'], 'html')
assert_equal(kwargs['mail'], mails.DIGEST)
assert_equal(kwargs['name'], user.fullname)
message = group_by_node(user_groups[last_user_index]['info'])
assert_equal(kwargs['message'], message)
assert_equal(kwargs['callback'], remove_notifications(email_notification_ids=email_notification_ids))
def test_remove_sent_digest_notifications(self):
d = factories.NotificationDigestFactory(
user_id=factories.UserFactory()._id,
timestamp=datetime.datetime.utcnow(),
message='Hello',
node_lineage=[factories.ProjectFactory()._id]
)
digest_id = d._id
remove_notifications(email_notification_ids=[digest_id])
with assert_raises(NoResultsFound):
NotificationDigest.find_one(Q('_id', 'eq', digest_id))
| brandonPurvis/osf.io | tests/test_notifications.py | Python | apache-2.0 | 59,970 |
#!/usr/bin/env python
import logging
from tornado.ioloop import IOLoop
from stormed import Connection, Message
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def on_connect():
global ch
ch = conn.channel()
ch.queue_declare(queue='rpc_queue', durable=True)
ch.qos(prefetch_count=1)
ch.consume('rpc_queue', on_request)
def on_request(msg):
n = int(msg.body)
print " [.] fib(%s)" % n
response = str(fib(n))
response_msg = Message(response, delivery_mode=2,
correlation_id=msg.correlation_id)
ch.publish(response_msg, exchange='', routing_key=msg.reply_to)
msg.ack()
logging.basicConfig()
ch = None
conn = Connection(host='localhost')
conn.connect(on_connect)
io_loop = IOLoop.instance()
print ' [*] Waiting for messages. To exit press CTRL+C'
try:
io_loop.start()
except KeyboardInterrupt:
conn.close(io_loop.stop)
| SimonWang2014/DockerConsoleApp | libs/stormed-amqp/examples/tutorial6/rpc_server.py | Python | apache-2.0 | 972 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common functionalities shared between different DRAC modules.
"""
from oslo.utils import importutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.drivers.modules.drac import client as drac_client
pywsman = importutils.try_import('pywsman')
REQUIRED_PROPERTIES = {
'drac_host': _('IP address or hostname of the DRAC card. Required.'),
'drac_username': _('username used for authentication. Required.'),
'drac_password': _('password used for authentication. Required.')
}
OPTIONAL_PROPERTIES = {
'drac_port': _('port used for WS-Man endpoint; default is 443. Optional.'),
'drac_path': _('path used for WS-Man endpoint; default is "/wsman". '
'Optional.'),
'drac_protocol': _('protocol used for WS-Man endpoint; one of http, https;'
' default is "https". Optional.'),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
# ReturnValue constants
RET_SUCCESS = '0'
RET_ERROR = '2'
RET_CREATED = '4096'
def parse_driver_info(node):
"""Parse a node's driver_info values.
Parses the driver_info of the node, reads default values
and returns a dict containing the combination of both.
:param node: an ironic node object.
:returns: a dict containing information from driver_info
and default values.
:raises: InvalidParameterValue if some mandatory information
is missing on the node or on invalid inputs.
"""
driver_info = node.driver_info
parsed_driver_info = {}
error_msgs = []
for param in REQUIRED_PROPERTIES:
try:
parsed_driver_info[param] = str(driver_info[param])
except KeyError:
error_msgs.append(_("'%s' not supplied to DracDriver.") % param)
except UnicodeEncodeError:
error_msgs.append(_("'%s' contains non-ASCII symbol.") % param)
parsed_driver_info['drac_port'] = driver_info.get('drac_port', 443)
try:
parsed_driver_info['drac_path'] = str(driver_info.get('drac_path',
'/wsman'))
except UnicodeEncodeError:
error_msgs.append(_("'drac_path' contains non-ASCII symbol."))
try:
parsed_driver_info['drac_protocol'] = str(
driver_info.get('drac_protocol', 'https'))
except UnicodeEncodeError:
error_msgs.append(_("'drac_protocol' contains non-ASCII symbol."))
try:
parsed_driver_info['drac_port'] = int(parsed_driver_info['drac_port'])
except ValueError:
error_msgs.append(_("'drac_port' is not an integer value."))
if error_msgs:
msg = (_('The following errors were encountered while parsing '
'driver_info:\n%s') % '\n'.join(error_msgs))
raise exception.InvalidParameterValue(msg)
return parsed_driver_info
def get_wsman_client(node):
"""Return a DRAC client object.
Given an ironic node object, this method gives back a
Client object which is a wrapper for pywsman.Client.
:param node: an ironic node object.
:returns: a Client object.
:raises: InvalidParameterValue if some mandatory information
is missing on the node or on invalid inputs.
"""
driver_info = parse_driver_info(node)
client = drac_client.Client(**driver_info)
return client
def find_xml(doc, item, namespace, find_all=False):
"""Find the first or all elements in a ElementTree object.
:param doc: the element tree object.
:param item: the element name.
:param namespace: the namespace of the element.
:param find_all: Boolean value, if True find all elements, if False
find only the first one. Defaults to False.
:returns: if find_all is False the element object will be returned
if found, None if not found. If find_all is True a list of
element objects will be returned or an empty list if no
elements were found.
"""
query = ('.//{%(namespace)s}%(item)s' % {'namespace': namespace,
'item': item})
if find_all:
return doc.findall(query)
return doc.find(query)
| froyobin/ironic | ironic/drivers/modules/drac/common.py | Python | apache-2.0 | 4,782 |
# Copyright (c) 2016 Claudiu Popa <pcmanticore@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
import sys
import textwrap
import six
import astroid
PY33 = sys.version_info >= (3, 3)
PY36 = sys.version_info >= (3, 6)
def _subprocess_transform():
if six.PY3:
communicate = (bytes('string', 'ascii'), bytes('string', 'ascii'))
communicate_signature = 'def communicate(self, input=None, timeout=None)'
if PY36:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=(), *,
encoding=None, errors=None):
pass
"""
else:
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=()):
pass
"""
else:
communicate = ('string', 'string')
communicate_signature = 'def communicate(self, input=None)'
init = """
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
pass
"""
if PY33:
wait_signature = 'def wait(self, timeout=None)'
else:
wait_signature = 'def wait(self)'
if six.PY3:
ctx_manager = '''
def __enter__(self): return self
def __exit__(self, *args): pass
'''
else:
ctx_manager = ''
code = textwrap.dedent('''
class Popen(object):
returncode = pid = 0
stdin = stdout = stderr = file()
%(communicate_signature)s:
return %(communicate)r
%(wait_signature)s:
return self.returncode
def poll(self):
return self.returncode
def send_signal(self, signal):
pass
def terminate(self):
pass
def kill(self):
pass
%(ctx_manager)s
''' % {'communicate': communicate,
'communicate_signature': communicate_signature,
'wait_signature': wait_signature,
'ctx_manager': ctx_manager})
init_lines = textwrap.dedent(init).splitlines()
indented_init = '\n'.join([' ' * 4 + line for line in init_lines])
code += indented_init
return astroid.parse(code)
astroid.register_module_extender(astroid.MANAGER, 'subprocess', _subprocess_transform)
| arju88nair/projectCulminate | venv/lib/python3.5/site-packages/astroid/brain/brain_subprocess.py | Python | apache-2.0 | 3,314 |
"""Tests for 1-Wire devices connected on OWServer."""
import copy
from unittest.mock import patch
import pytest
from homeassistant.components.onewire.switch import DEVICE_SWITCHES
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TOGGLE, STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
from . import setup_onewire_patched_owserver_integration, setup_owproxy_mock_devices
from .const import MOCK_OWPROXY_DEVICES
from tests.common import mock_registry
MOCK_SWITCHES = {
key: value
for (key, value) in MOCK_OWPROXY_DEVICES.items()
if SWITCH_DOMAIN in value
}
@pytest.mark.parametrize("device_id", MOCK_SWITCHES.keys())
@patch("homeassistant.components.onewire.onewirehub.protocol.proxy")
async def test_owserver_switch(owproxy, hass, device_id):
"""Test for 1-Wire switch.
This test forces all entities to be enabled.
"""
await async_setup_component(hass, "persistent_notification", {})
entity_registry = mock_registry(hass)
setup_owproxy_mock_devices(owproxy, SWITCH_DOMAIN, [device_id])
mock_device = MOCK_SWITCHES[device_id]
expected_entities = mock_device[SWITCH_DOMAIN]
# Force enable switches
patch_device_switches = copy.deepcopy(DEVICE_SWITCHES)
for item in patch_device_switches[device_id[0:2]]:
item.entity_registry_enabled_default = True
with patch(
"homeassistant.components.onewire.PLATFORMS", [SWITCH_DOMAIN]
), patch.dict(
"homeassistant.components.onewire.switch.DEVICE_SWITCHES", patch_device_switches
):
await setup_onewire_patched_owserver_integration(hass)
await hass.async_block_till_done()
assert len(entity_registry.entities) == len(expected_entities)
for expected_entity in expected_entities:
entity_id = expected_entity["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
state = hass.states.get(entity_id)
assert state.state == expected_entity["result"]
if state.state == STATE_ON:
owproxy.return_value.read.side_effect = [b" 0"]
expected_entity["result"] = STATE_OFF
elif state.state == STATE_OFF:
owproxy.return_value.read.side_effect = [b" 1"]
expected_entity["result"] = STATE_ON
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TOGGLE,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state.state == expected_entity["result"]
assert state.attributes["device_file"] == expected_entity.get(
"device_file", registry_entry.unique_id
)
| sander76/home-assistant | tests/components/onewire/test_switch.py | Python | apache-2.0 | 2,860 |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for t2t_trainer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.bin import t2t_trainer
from tensor2tensor.utils import trainer_lib_test
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class TrainerTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
trainer_lib_test.TrainerLibTest.setUpClass()
def testTrain(self):
FLAGS.problem = "tiny_algo"
FLAGS.model = "transformer"
FLAGS.hparams_set = "transformer_tiny"
FLAGS.train_steps = 1
FLAGS.eval_steps = 1
FLAGS.output_dir = tf.test.get_temp_dir()
FLAGS.data_dir = tf.test.get_temp_dir()
t2t_trainer.main(None)
if __name__ == "__main__":
tf.test.main()
| mlperf/training_results_v0.5 | v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/t2t/tensor2tensor/bin/t2t_trainer_test.py | Python | apache-2.0 | 1,357 |
#!Measurement
# all of this is configuration info that can be used in the script.
# you refer to these values using mx.<group>.<attribute>
# e.g
# mx.baseline.counts is 180
# mx.multicollect.detector is H1
'''
baseline:
after: true
before: false
counts: 180
detector: H1
mass: 34.2
settling_time: 15
default_fits: nominal
equilibration:
eqtime: 1.0
inlet: R
inlet_delay: 3
outlet: O
use_extraction_eqtime: true
multicollect:
counts: 400
detector: H1
isotope: Ar40
peakcenter:
after: true
before: false
detector: H1
detectors:
- H1
- AX
- CDD
isotope: Ar40
peakhop:
hops_name: ''
use_peak_hop: false
'''
# entry point for the script
def main():
# print a message to the user
info('unknown measurement script')
# activate the following detectors. measurements will be plotted and save for these detectors
activate_detectors('H2', 'H1', 'AX', 'L1', 'L2', 'CDD')
# position the magnet with Ar40 on H1
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
# choose where to get the equilibration duration from
# sniff the gas during equilibration
if mx.equilibration.use_extraction_eqtime:
eqt = eqtime
else:
eqt = mx.equilibration.eqtime
'''
Equilibrate is non-blocking so use a sniff or sleep as a placeholder
e.g sniff(<equilibration_time>) or sleep(<equilibration_time>)
'''
# start the equilibration thread
equilibrate(eqtime=eqt, inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet,
delay=mx.equilibration.inlet_delay)
# set time zero after equilibrate returns i.e after the ion pump valve closes
set_time_zero()
# record/plot the equilibration
sniff(eqt)
# set the default fits
set_fits()
set_baseline_fits()
# multicollect on active detectors for 400
multicollect(ncounts=mx.multicollect.counts)
if mx.baseline.after:
# do a baseline measurement
baselines(ncounts=mx.baseline.counts, mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
if mx.peakcenter.after:
# do a peak center scan and update the mftable with new peak centers
activate_detectors(*mx.peakcenter.detectors, **{'peak_center': True})
peak_center(detector=mx.peakcenter.detector, isotope=mx.peakcenter.isotope)
# print a message to the user
info('finished measure script') | USGSDenverPychron/pychron | docs/user_guide/operation/scripts/examples/basic.py | Python | apache-2.0 | 2,487 |
"""Constants for the Toon integration."""
from datetime import timedelta
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_PROBLEM,
)
from homeassistant.components.sensor import DEVICE_CLASS_POWER
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ICON,
ATTR_NAME,
ATTR_UNIT_OF_MEASUREMENT,
ENERGY_KILO_WATT_HOUR,
POWER_WATT,
UNIT_PERCENTAGE,
)
DOMAIN = "toon"
CONF_AGREEMENT = "agreement"
CONF_AGREEMENT_ID = "agreement_id"
CONF_CLOUDHOOK_URL = "cloudhook_url"
CONF_MIGRATE = "migrate"
DEFAULT_SCAN_INTERVAL = timedelta(seconds=300)
DEFAULT_MAX_TEMP = 30.0
DEFAULT_MIN_TEMP = 6.0
CURRENCY_EUR = "EUR"
VOLUME_CM3 = "CM3"
VOLUME_M3 = "M3"
ATTR_DEFAULT_ENABLED = "default_enabled"
ATTR_INVERTED = "inverted"
ATTR_MEASUREMENT = "measurement"
ATTR_SECTION = "section"
BINARY_SENSOR_ENTITIES = {
"thermostat_info_boiler_connected_None": {
ATTR_NAME: "Boiler Module Connection",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "boiler_module_connected",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: DEVICE_CLASS_CONNECTIVITY,
ATTR_ICON: "mdi:check-network-outline",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_info_burner_info_1": {
ATTR_NAME: "Boiler Heating",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "heating",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:fire",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_info_burner_info_2": {
ATTR_NAME: "Hot Tap Water",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "hot_tapwater",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:water-pump",
ATTR_DEFAULT_ENABLED: True,
},
"thermostat_info_burner_info_3": {
ATTR_NAME: "Boiler Preheating",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "pre_heating",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:fire",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_info_burner_info_None": {
ATTR_NAME: "Boiler Burner",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "burner",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:fire",
ATTR_DEFAULT_ENABLED: True,
},
"thermostat_info_error_found_255": {
ATTR_NAME: "Boiler Status",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "error_found",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,
ATTR_ICON: "mdi:alert",
ATTR_DEFAULT_ENABLED: True,
},
"thermostat_info_ot_communication_error_0": {
ATTR_NAME: "OpenTherm Connection",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "opentherm_communication_error",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: DEVICE_CLASS_PROBLEM,
ATTR_ICON: "mdi:check-network-outline",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_program_overridden": {
ATTR_NAME: "Thermostat Program Override",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "program_overridden",
ATTR_INVERTED: False,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gesture-tap",
ATTR_DEFAULT_ENABLED: True,
},
}
SENSOR_ENTITIES = {
"gas_average": {
ATTR_NAME: "Average Gas Usage",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "average",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_CM3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"gas_average_daily": {
ATTR_NAME: "Average Daily Gas Usage",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "day_average",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_M3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: False,
},
"gas_daily_usage": {
ATTR_NAME: "Gas Usage Today",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "day_usage",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_M3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"gas_daily_cost": {
ATTR_NAME: "Gas Cost Today",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "day_cost",
ATTR_UNIT_OF_MEASUREMENT: CURRENCY_EUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"gas_meter_reading": {
ATTR_NAME: "Gas Meter",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "meter",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_M3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: False,
},
"gas_value": {
ATTR_NAME: "Current Gas Usage",
ATTR_SECTION: "gas_usage",
ATTR_MEASUREMENT: "current",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_CM3,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:gas-cylinder",
ATTR_DEFAULT_ENABLED: True,
},
"power_average": {
ATTR_NAME: "Average Power Usage",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "average",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_average_daily": {
ATTR_NAME: "Average Daily Energy Usage",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_average",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_daily_cost": {
ATTR_NAME: "Energy Cost Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_cost",
ATTR_UNIT_OF_MEASUREMENT: CURRENCY_EUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: True,
},
"power_daily_value": {
ATTR_NAME: "Energy Usage Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_usage",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: True,
},
"power_meter_reading": {
ATTR_NAME: "Electricity Meter Feed IN Tariff 1",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_high",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_meter_reading_low": {
ATTR_NAME: "Electricity Meter Feed IN Tariff 2",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_high",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"power_value": {
ATTR_NAME: "Current Power Usage",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "current",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: True,
},
"solar_meter_reading_produced": {
ATTR_NAME: "Electricity Meter Feed OUT Tariff 1",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_produced_high",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"solar_meter_reading_low_produced": {
ATTR_NAME: "Electricity Meter Feed OUT Tariff 2",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "meter_produced_low",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"solar_value": {
ATTR_NAME: "Current Solar Power Production",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "current_solar",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: True,
},
"solar_maximum": {
ATTR_NAME: "Max Solar Power Production Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_max_solar",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: True,
},
"solar_produced": {
ATTR_NAME: "Solar Power Production to Grid",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "current_produced",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: True,
},
"power_usage_day_produced_solar": {
ATTR_NAME: "Solar Energy Produced Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_produced_solar",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: True,
},
"power_usage_day_to_grid_usage": {
ATTR_NAME: "Energy Produced To Grid Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_to_grid_usage",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: False,
},
"power_usage_day_from_grid_usage": {
ATTR_NAME: "Energy Usage From Grid Today",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "day_from_grid_usage",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:power-plug",
ATTR_DEFAULT_ENABLED: False,
},
"solar_average_produced": {
ATTR_NAME: "Average Solar Power Production to Grid",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "average_produced",
ATTR_UNIT_OF_MEASUREMENT: POWER_WATT,
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: False,
},
"thermostat_info_current_modulation_level": {
ATTR_NAME: "Boiler Modulation Level",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "current_modulation_level",
ATTR_UNIT_OF_MEASUREMENT: UNIT_PERCENTAGE,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:percent",
ATTR_DEFAULT_ENABLED: False,
},
"power_usage_current_covered_by_solar": {
ATTR_NAME: "Current Power Usage Covered By Solar",
ATTR_SECTION: "power_usage",
ATTR_MEASUREMENT: "current_covered_by_solar",
ATTR_UNIT_OF_MEASUREMENT: UNIT_PERCENTAGE,
ATTR_DEVICE_CLASS: None,
ATTR_ICON: "mdi:solar-power",
ATTR_DEFAULT_ENABLED: True,
},
}
SWITCH_ENTITIES = {
"thermostat_holiday_mode": {
ATTR_NAME: "Holiday Mode",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "holiday_mode",
ATTR_INVERTED: False,
ATTR_ICON: "mdi:airport",
ATTR_DEFAULT_ENABLED: True,
},
"thermostat_program": {
ATTR_NAME: "Thermostat Program",
ATTR_SECTION: "thermostat",
ATTR_MEASUREMENT: "program",
ATTR_INVERTED: False,
ATTR_ICON: "mdi:calendar-clock",
ATTR_DEFAULT_ENABLED: True,
},
}
| nkgilley/home-assistant | homeassistant/components/toon/const.py | Python | apache-2.0 | 11,757 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Operations for handling session logging and shutdown notifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from google.protobuf import text_format
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
_WATCHDOG = None
class CoordinatorShutdownException(Exception):
"""Raised when the coordinator needs to shutdown."""
pass
def _clone_session(session, graph=None):
return session_lib.Session(
target=session.sess_str,
config=session._config, # pylint: disable=protected-access
graph=graph if graph else session.graph)
def _make_heartbeat_op(session, device, request_ph):
"""Return a heartbeat op or None if heartbeats are not supported by device."""
try:
# Test if we can connect in a isolated graph + session
with ops.Graph().as_default():
with _clone_session(session) as temp_session:
with ops.device(device):
heartbeat_op = tpu_ops.worker_heartbeat('')
options = config_pb2.RunOptions(timeout_in_ms=5000)
temp_session.run(heartbeat_op, options=options)
except errors.InvalidArgumentError as _:
logging.warning('Error running heartbeat on %s', device)
return None
except errors.DeadlineExceededError as _:
logging.warning('Timeout connecting to %s when testing heartbeat', device)
return None
# If we successfully connected and pinged the worker, go ahead and construct
# the operation.
with ops.device(device):
return tpu_ops.worker_heartbeat(request_ph)
class WorkerHeartbeatManager(object):
"""Manages the status/heartbeat monitor for a set of workers."""
def __init__(self, session, devices, heartbeat_ops, request_placeholder):
"""Construct a new WorkerHeartbeatManager.
(Prefer using `WorkerHeartbeatManager.from_devices` when possible.)
Args:
session: `tf.Session`, session to use for heartbeat operations.
devices: `list[string]` Set of devices to connect to.
heartbeat_ops: `list[tf.Operation]` Heartbeat operations.
request_placeholder: `tf.Placeholder[String]` Placeholder used to specify
the WorkerHeartbeatRequest protocol buffer.
"""
self._session = session
self._devices = devices
self._ops = heartbeat_ops
self._request_placeholder = request_placeholder
@staticmethod
def from_devices(session, devices):
"""Construct a heartbeat manager for the given devices."""
if not devices:
logging.error('Trying to create heartbeat manager with no devices?')
logging.info('Creating heartbeat manager for %s', devices)
request_placeholder = array_ops.placeholder(
name='worker_heartbeat_request', dtype=dtypes.string)
heartbeat_ops = []
kept_devices = []
for device in devices:
heartbeat_op = _make_heartbeat_op(session, device, request_placeholder)
if heartbeat_op is not None:
kept_devices.append(device)
heartbeat_ops.append(heartbeat_op)
else:
logging.warning('Heartbeat support not available for %s', device)
return WorkerHeartbeatManager(session, kept_devices, heartbeat_ops,
request_placeholder)
def num_workers(self):
return len(self._devices)
def configure(self, message):
"""Configure heartbeat manager for all devices.
Args:
message: `event_pb2.WorkerHeartbeatRequest`
Returns: `None`
"""
logging.info('Configuring worker heartbeat: %s',
text_format.MessageToString(message))
self._session.run(self._ops,
{self._request_placeholder: message.SerializeToString()})
def ping(self, request=None, timeout_in_ms=5000):
"""Ping all workers, returning the parsed status results."""
if request is None:
request = event_pb2.WorkerHeartbeatRequest()
options = config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
results = self._session.run(
self._ops,
feed_dict={self._request_placeholder: request.SerializeToString()},
options=options)
parsed_results = [
event_pb2.WorkerHeartbeatResponse.FromString(res_pb)
for res_pb in results
]
logging.debug('Ping results: %s', parsed_results)
return parsed_results
def lame_workers(self):
"""Ping all workers, returning manager containing lame workers (or None)."""
ping_results = self.ping()
lame_workers = []
for ping_response, device, op in zip(ping_results, self._devices,
self._ops):
if ping_response.health_status != event_pb2.OK:
lame_workers.append((device, op))
if not lame_workers:
return None
bad_devices, bad_ops = zip(*lame_workers)
return WorkerHeartbeatManager(self._session, bad_devices, bad_ops,
self._request_placeholder)
def __repr__(self):
return 'HeartbeatManager(%s)' % ','.join(self._devices)
def shutdown(self, timeout_ms=10000):
"""Shutdown all workers after `shutdown_timeout_secs`."""
logging.info('Shutting down %s.', self)
req = event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=timeout_ms))
self.configure(req)
# Wait for workers to shutdown. This isn't strictly required
# but it avoids triggering multiple checkpoints with the same lame worker.
logging.info('Waiting %dms for worker shutdown.', timeout_ms)
time.sleep(timeout_ms / 1000)
def all_worker_devices(session):
"""Return a list of devices for each worker in the system."""
devices = session.list_devices()
return [
device.name
for device in devices
if ':CPU:' in device.name and 'coordinator' not in device.name
]
class WatchdogManager(threading.Thread):
"""Configures worker watchdog timer and handles periodic pings.
Usage:
# Ping workers every minute, shutting down workers if they haven't received
# a ping after 1 hour.
watchdog_manager = WatchdogManager(
ping_interval=60, shutdown_timeout=3600
)
# Use as a context manager, resetting watchdog on context exit:
with watchdog_manager:
session.run(...)
# Or setup globally; watchdog will remain active until program exit.
watchdog_manager.configure_and_run()
"""
def __init__(self,
session,
devices=None,
ping_interval=60,
shutdown_timeout=3600):
"""Initialize a watchdog manager.
Args:
session: Session connected to worker devices. A cloned session and graph
will be created for managing worker pings.
devices: Set of devices to monitor. If none, all workers will be
monitored.
ping_interval: Time, in seconds, between watchdog pings.
shutdown_timeout: Time, in seconds, before watchdog timeout.
"""
threading.Thread.__init__(self)
self.ping_interval = ping_interval
self.shutdown_timeout = shutdown_timeout
self.daemon = True
self._config = session._config # pylint: disable=protected-access
self._target = session.sess_str
self._running = False
self._devices = devices
self._graph = None
self._session = None
self._worker_manager = None
def _reset_manager(self):
"""Reset the graph, session and worker manager."""
self._graph = ops.Graph()
self._session = session_lib.Session(
target=self._target,
graph=self._graph,
config=self._config,
)
if self._devices is None:
self._devices = all_worker_devices(self._session)
with self._graph.as_default():
self._worker_manager = WorkerHeartbeatManager.from_devices(
self._session, self._devices)
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(
timeout_ms=self.shutdown_timeout * 1000,),
shutdown_mode=event_pb2.WAIT_FOR_COORDINATOR))
def configure_and_run(self):
logging.info(
'Enabling watchdog timer with %d second timeout '
'and %d second ping interval.', self.shutdown_timeout,
self.ping_interval)
self._reset_manager()
self._running = True
self.start()
def stop(self):
logging.info('Stopping worker watchdog.')
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=-1,),
shutdown_mode=event_pb2.NOT_CONFIGURED))
self._running = False
self.join()
def __enter__(self):
self.configure_and_run()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def run(self):
# Don't fetch logs or adjust timing: just ping the watchdog.
#
# If we hit an exception, reset our session as it is likely broken.
while self._running:
try:
self._worker_manager.ping(request=None)
time.sleep(self.ping_interval)
except errors.OpError as e:
# Catch any TF errors that occur so we don't stop sending heartbeats
logging.debug('Caught error while sending heartbeat: %s', e)
self._reset_manager()
def start_worker_watchdog(session,
devices=None,
ping_interval=60,
shutdown_timeout=3600):
"""Start global worker watchdog to shutdown workers on coordinator exit."""
global _WATCHDOG
if _WATCHDOG is None:
# Ensure we can send a few pings before we timeout!
ping_interval = min(shutdown_timeout / 10., ping_interval)
_WATCHDOG = WatchdogManager(session, devices, ping_interval,
shutdown_timeout)
_WATCHDOG.configure_and_run()
class GracefulShutdownHook(session_run_hook.SessionRunHook):
"""Session hook that watches for shutdown events.
If a shutdown is indicated, `saver.save(checkpoint_prefix)` is executed, and a
SystemShutdown exception is raised to terminate the main session. If `saver`
is None the `SAVERS` collection will be read to find a saver.
`on_shutdown_hooks` is an optional list of functions that should be called
after checkpointing. The function is called with (`run_context`,
`all_workers`, `lame_workers`).
If `heartbeat_group` is not specified, it will default to all CPU workers
in the system.
"""
def __init__(self, checkpoint_prefix, saver=None, on_shutdown_hooks=None):
self._saver = saver
self._checkpoint_prefix = checkpoint_prefix
self._on_shutdown_hooks = on_shutdown_hooks if on_shutdown_hooks else []
# Worker heartbeats are managed independently of the main training graph.
self._graph = ops.Graph()
self._workers = None
self._session = None
self._heartbeat_supported = False
def after_create_session(self, training_session, coord): # pylint: disable=unused-argument
# N.B. We have to pull the global step here to avoid it being unavailable
# at checkpoint time; the graph has been frozen at that point.
if training_util.get_global_step() is None and self.saver() is not None:
raise ValueError(
'Saver defined but no global step. Run `get_or_create_global_step()`'
' in your model definition to allow checkpointing.')
with self._graph.as_default():
logging.info('Installing graceful shutdown hook.')
self._session = _clone_session(training_session, self._graph)
self._workers = WorkerHeartbeatManager.from_devices(
self._session, all_worker_devices(self._session))
self._heartbeat_supported = self._workers.num_workers() > 0
if self._heartbeat_supported:
self._workers.configure(
event_pb2.WorkerHeartbeatRequest(
shutdown_mode=event_pb2.WAIT_FOR_COORDINATOR))
else:
logging.warn(
'No workers support hearbeats. Failure handling will be disabled.')
def saver(self):
if self._saver:
return self._saver
savers = ops.get_collection(ops.GraphKeys.SAVERS)
if not savers:
return None
if not isinstance(savers, list):
return savers
if len(savers) > 1:
logging.error(
'Multiple savers in the SAVERS collection. On-demand checkpointing '
'will be disabled. Pass an explicit `saver` to the constructor to '
'override this behavior.')
return None
return savers[0]
def after_run(self, run_context, run_values):
del run_values
if not self._heartbeat_supported:
return
lame_workers = self._workers.lame_workers()
if lame_workers:
logging.info('ShutdownHook: lame workers found: %s', lame_workers)
if self.saver():
logging.info('ShutdownHook: saving checkpoint to %s',
self._checkpoint_prefix)
self.saver().save(
run_context.session,
self._checkpoint_prefix,
global_step=training_util.get_global_step(),
write_state=True,
)
else:
logging.info('ShutdownHook: no Saver defined.')
for fn in self._on_shutdown_hooks:
fn(run_context, self._workers, lame_workers)
class RestartComputation(object):
"""Restart the entire computation.
This hook shuts down all workers and returns control to the top-level by
throwing a CoordinatorShutdownException.
"""
def __init__(self, timeout_ms=10000):
self.timeout_ms = timeout_ms
def __call__(self, run_context, all_workers, lame_workers):
del run_context, lame_workers
all_workers.shutdown(timeout_ms=self.timeout_ms)
logging.info('Terminating coordinator.')
raise CoordinatorShutdownException()
class ShutdownLameWorkers(object):
"""Shutdown lamed workers.
Processing will continue normally (typically by waiting for the down
workers to be restarted).
"""
def __init__(self, timeout_ms=10000):
self.timeout_in_ms = timeout_ms
def __call__(self, run_context, all_workers, lame_workers):
lame_workers.shutdown(timeout_ms=self.timeout_in_ms)
| hfp/tensorflow-xsmm | tensorflow/contrib/tpu/python/tpu/session_support.py | Python | apache-2.0 | 15,206 |
import unittest
import sys
import inspect
from robot.running.handlers import _PythonHandler, _JavaHandler, DynamicHandler
from robot import utils
from robot.utils.asserts import *
from robot.running.testlibraries import TestLibrary
from robot.running.dynamicmethods import (
GetKeywordArguments, GetKeywordDocumentation, RunKeyword)
from robot.errors import DataError
from classes import NameLibrary, DocLibrary, ArgInfoLibrary
from ArgumentsPython import ArgumentsPython
if utils.JYTHON:
import ArgumentsJava
def _get_handler_methods(lib):
attrs = [getattr(lib, a) for a in dir(lib) if not a.startswith('_')]
return [a for a in attrs if inspect.ismethod(a)]
def _get_java_handler_methods(lib):
# This hack assumes that all java handlers used start with 'a_' -- easier
# than excluding 'equals' etc. otherwise
return [a for a in _get_handler_methods(lib) if a.__name__.startswith('a_') ]
class LibraryMock:
def __init__(self, name='MyLibrary', scope='GLOBAL'):
self.name = self.orig_name = name
self.scope = scope
class TestPythonHandler(unittest.TestCase):
def test_name(self):
for method in _get_handler_methods(NameLibrary()):
handler = _PythonHandler(LibraryMock('mylib'), method.__name__, method)
assert_equals(handler.name, method.__doc__)
assert_equals(handler.longname, 'mylib.'+method.__doc__)
def test_docs(self):
for method in _get_handler_methods(DocLibrary()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
assert_equals(handler.doc, method.expected_doc)
assert_equals(handler.shortdoc, method.expected_shortdoc)
def test_arguments(self):
for method in _get_handler_methods(ArgInfoLibrary()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
args = handler.arguments
argspec = (args.positional, args.defaults, args.varargs, args.kwargs)
expected = eval(method.__doc__)
assert_equals(argspec, expected, method.__name__)
def test_arg_limits(self):
for method in _get_handler_methods(ArgumentsPython()):
handler = _PythonHandler(LibraryMock(), method.__name__, method)
exp_mina, exp_maxa = eval(method.__doc__)
assert_equals(handler.arguments.minargs, exp_mina)
assert_equals(handler.arguments.maxargs, exp_maxa)
def test_getarginfo_getattr(self):
handlers = TestLibrary('classes.GetattrLibrary').handlers
assert_equals(len(handlers), 3)
for handler in handlers:
assert_true(handler.name in ['Foo','Bar','Zap'])
assert_equals(handler.arguments.minargs, 0)
assert_equals(handler.arguments.maxargs, sys.maxint)
class TestDynamicHandlerCreation(unittest.TestCase):
def test_none_doc(self):
self._assert_doc(None, '')
def test_empty_doc(self):
self._assert_doc('')
def test_non_empty_doc(self):
self._assert_doc('This is some documentation')
def test_non_ascii_doc(self):
self._assert_doc(u'P\xe4iv\xe4\xe4')
if not utils.IRONPYTHON:
def test_with_utf8_doc(self):
doc = u'P\xe4iv\xe4\xe4'
self._assert_doc(doc.encode('UTF-8'), doc)
def test_invalid_doc_type(self):
self._assert_fails('Return value must be string.', doc=True)
def test_none_argspec(self):
self._assert_spec(None, maxargs=sys.maxint, vararg='varargs', kwarg=False)
def test_none_argspec_when_kwargs_supported(self):
self._assert_spec(None, maxargs=sys.maxint, vararg='varargs', kwarg='kwargs')
def test_empty_argspec(self):
self._assert_spec([])
def test_mandatory_args(self):
for argspec in [['arg'], ['arg1', 'arg2', 'arg3']]:
self._assert_spec(argspec, len(argspec), len(argspec), argspec)
def test_only_default_args(self):
self._assert_spec(['defarg1=value', 'defarg2=defvalue'], 0, 2,
['defarg1', 'defarg2'], ['value', 'defvalue'])
def test_default_value_may_contain_equal_sign(self):
self._assert_spec(['d=foo=bar'], 0, 1, ['d'], ['foo=bar'])
def test_varargs(self):
self._assert_spec(['*vararg'], 0, sys.maxint, vararg='vararg')
def test_kwargs(self):
self._assert_spec(['**kwarg'], 0, 0, kwarg='kwarg')
def test_varargs_and_kwargs(self):
self._assert_spec(['*vararg', '**kwarg'],
0, sys.maxint, vararg='vararg', kwarg='kwarg')
def test_integration(self):
self._assert_spec(['arg', 'default=value'], 1, 2,
['arg', 'default'], ['value'])
self._assert_spec(['arg', 'default=value', '*var'], 1, sys.maxint,
['arg', 'default'], ['value'], 'var')
self._assert_spec(['arg', 'default=value', '**kw'], 1, 2,
['arg', 'default'], ['value'], None, 'kw')
self._assert_spec(['arg', 'default=value', '*var', '**kw'], 1, sys.maxint,
['arg', 'default'], ['value'], 'var', 'kw')
def test_invalid_argspec_type(self):
for argspec in [True, [1, 2]]:
self._assert_fails("Return value must be list of strings.", argspec)
def test_mandatory_arg_after_default_arg(self):
for argspec in [['d=v', 'arg'], ['a', 'b', 'c=v', 'd']]:
self._assert_fails('Non-default argument after default arguments.',
argspec)
def test_positional_after_vararg(self):
for argspec in [['*foo', 'arg'], ['arg', '*var', 'arg'],
['a', 'b=d', '*var', 'c'], ['*var', '*vararg']]:
self._assert_fails('Positional argument after varargs.', argspec)
def test_kwarg_not_last(self):
for argspec in [['**foo', 'arg'], ['arg', '**kw', 'arg'],
['a', 'b=d', '**kw', 'c'], ['**kw', '*vararg'],
['**kw', '**kwarg']]:
self._assert_fails('Only last argument can be kwargs.', argspec)
def test_missing_kwargs_support(self):
self._assert_fails("Too few 'run_keyword' method parameters"
" for **kwargs support.",
['**kwargs'])
def _assert_doc(self, doc, expected=None):
expected = doc if expected is None else expected
assert_equals(self._create_handler(doc=doc).doc, expected)
def _assert_spec(self, argspec, minargs=0, maxargs=0, positional=[],
defaults=[], vararg=None, kwarg=None):
if kwarg is None:
kwargs_support_modes = [True, False]
elif kwarg is False:
kwargs_support_modes = [False]
kwarg = None
else:
kwargs_support_modes = [True]
for kwargs_support in kwargs_support_modes:
arguments = self._create_handler(argspec,
kwargs_support=kwargs_support
).arguments
assert_equals(arguments.minargs, minargs)
assert_equals(arguments.maxargs, maxargs)
assert_equals(arguments.positional, positional)
assert_equals(arguments.defaults, defaults)
assert_equals(arguments.varargs, vararg)
assert_equals(arguments.kwargs, kwarg)
def _assert_fails(self, error, argspec=None, doc=None):
assert_raises_with_msg(DataError, error,
self._create_handler, argspec, doc)
def _create_handler(self, argspec=None, doc=None, kwargs_support=False):
lib = LibraryMock('TEST CASE')
if kwargs_support:
lib.run_keyword = lambda name, args, kwargs: None
else:
lib.run_keyword = lambda name, args: None
lib.run_keyword.__name__ = 'run_keyword'
doc = GetKeywordDocumentation(lib)._handle_return_value(doc)
argspec = GetKeywordArguments(lib)._handle_return_value(argspec)
return DynamicHandler(lib, 'mock', RunKeyword(lib), doc, argspec)
if utils.JYTHON:
handlers = dict((method.__name__, method) for method in
_get_java_handler_methods(ArgumentsJava('Arg', ['varargs'])))
class TestJavaHandler(unittest.TestCase):
def test_arg_limits_no_defaults_or_varargs(self):
for count in [0, 1, 3]:
method = handlers['a_%d' % count]
handler = _JavaHandler(LibraryMock(), method.__name__, method)
assert_equals(handler.arguments.minargs, count)
assert_equals(handler.arguments.maxargs, count)
def test_arg_limits_with_varargs(self):
for count in [0, 1]:
method = handlers['a_%d_n' % count]
handler = _JavaHandler(LibraryMock(), method.__name__, method)
assert_equals(handler.arguments.minargs, count)
assert_equals(handler.arguments.maxargs, sys.maxint)
def test_arg_limits_with_defaults(self):
# defaults i.e. multiple signatures
for mina, maxa in [(0, 1), (1, 3)]:
method = handlers['a_%d_%d' % (mina, maxa)]
handler = _JavaHandler(LibraryMock(), method.__name__, method)
assert_equals(handler.arguments.minargs, mina)
assert_equals(handler.arguments.maxargs, maxa)
class TestArgumentCoercer(unittest.TestCase):
def setUp(self):
self.lib = TestLibrary('ArgTypeCoercion', ['42', 'true'])
def test_coercion_in_constructor(self):
instance = self.lib.get_instance()
assert_equals(instance.myInt, 42)
assert_equals(instance.myBool, True)
def test_coercing_to_integer(self):
self._test_coercion(self._handler_named('intArgument'),
['1'], [1])
def test_coercing_to_boolean(self):
handler = self._handler_named('booleanArgument')
self._test_coercion(handler, ['True'], [True])
self._test_coercion(handler, ['FALSE'], [ False])
def test_coercing_to_real_number(self):
self._test_coercion(self._handler_named('doubleArgument'),
['1.42'], [1.42])
self._test_coercion(self._handler_named('floatArgument'),
['-9991.098'], [-9991.098])
def test_coercion_with_compatible_types(self):
self._test_coercion(self._handler_named('coercableKeywordWithCompatibleTypes'),
['9999', '-42', 'FaLsE', '31.31'],
[9999, -42, False, 31.31])
def test_arguments_that_are_not_strings_are_not_coerced(self):
self._test_coercion(self._handler_named('intArgument'),
[self.lib], [self.lib])
self._test_coercion(self._handler_named('booleanArgument'),
[42], [42])
def test_coercion_fails_with_reasonable_message(self):
exp_msg = 'Argument at position 1 cannot be coerced to %s.'
self._test_coercion_fails(self._handler_named('intArgument'),
exp_msg % 'integer')
self._test_coercion_fails(self._handler_named('booleanArgument'),
exp_msg % 'boolean')
self._test_coercion_fails(self._handler_named('floatArgument'),
exp_msg % 'floating point number')
def test_no_arg_no_coercion(self):
self._test_coercion(self._handler_named('noArgument'), [], [])
def test_coercing_multiple_arguments(self):
self._test_coercion(self._handler_named('coercableKeyword'),
['10.0', '42', 'tRUe'], [10.0, 42, True])
def test_coercion_is_not_done_with_conflicting_signatures(self):
self._test_coercion(self._handler_named('unCoercableKeyword'),
['True', '42'], ['True', '42'])
def test_coercable_and_uncoercable_args_in_same_kw(self):
self._test_coercion(self._handler_named('coercableAndUnCoercableArgs'),
['1', 'False', '-23', '0'], ['1', False, -23, '0'])
def _handler_named(self, name):
return self.lib.handlers[name]
def _test_coercion(self, handler, args, expected):
assert_equals(handler._arg_coercer.coerce(args, {}), expected)
def _test_coercion_fails(self, handler, expected_message):
assert_raises_with_msg(ValueError, expected_message,
handler._arg_coercer.coerce, ['invalid'], {})
if __name__ == '__main__':
unittest.main()
| yahman72/robotframework | utest/running/test_handlers.py | Python | apache-2.0 | 12,868 |
#!/usr/bin/env python
import rospy
import os
import roslib
roslib.load_manifest("denso_pendant_publisher")
roslib.load_manifest("actionlib_msgs")
import denso_pendant_publisher.msg
import std_msgs.msg
import actionlib_msgs.msg
rospy.init_node("moveit_canceler")
g_runnable = True
g_prev_status = None
def pendantCB(msg):
global g_runnable, g_prev_status
if g_prev_status:
if (not g_prev_status.button_cancel and msg.button_cancel) or (not g_prev_status.button_stop and msg.button_stop): # canceled or stopped
g_runnable = False
# here we should send cancel
cancel = actionlib_msgs.msg.GoalID()
cancel.id = ""
cancel_pub.publish(cancel)
rospy.loginfo("cancel")
g_prev_status = msg
sub = rospy.Subscriber("/denso_pendant_publisher/status", denso_pendant_publisher.msg.PendantStatus, pendantCB)
cancel_pub = rospy.Publisher("/arm_controller/follow_joint_trajectory/cancel", actionlib_msgs.msg.GoalID);
# cancel_pub = rospy.Publisher("/move_group/cancel", actionlib_msgs.msg.GoalID);
rospy.spin()
| mikewrock/phd_backup_full | src/wrock/vs060/scripts/moveit_canceler.py | Python | apache-2.0 | 1,094 |
import sys
sys.path.insert(1, "../../../")
import h2o
def iris_nfolds(ip,port):
iris = h2o.import_file(path=h2o.locate("smalldata/iris/iris.csv"))
model = h2o.random_forest(y=iris[4], x=iris[0:4], ntrees=50, nfolds=5)
model.show()
# Can specify both nfolds >= 2 and validation = H2OParsedData at once
try:
h2o.random_forest(y=iris[4], x=iris[0:4], validation_y=iris[4], validation_x=iris[0:4], ntrees=50, nfolds=5)
assert True
except EnvironmentError:
assert False, "expected an error"
if __name__ == "__main__":
h2o.run_test(sys.argv, iris_nfolds)
| weaver-viii/h2o-3 | h2o-py/tests/testdir_algos/rf/pyunit_iris_nfoldsRF.py | Python | apache-2.0 | 616 |
#!/usr/bin/env python
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.aff4_objects import user_managers
class GRRUserTest(test_lib.AFF4ObjectTest):
def testUserPasswords(self):
with aff4.FACTORY.Create("aff4:/users/test", "GRRUser",
token=self.token) as user:
user.SetPassword("hello")
user = aff4.FACTORY.Open(user.urn, token=self.token)
self.assertFalse(user.CheckPassword("goodbye"))
self.assertTrue(user.CheckPassword("hello"))
def testLabels(self):
with aff4.FACTORY.Create("aff4:/users/test", "GRRUser",
token=self.token) as user:
user.SetLabels("hello", "world", owner="GRR")
user = aff4.FACTORY.Open(user.urn, token=self.token)
self.assertListEqual(["hello", "world"], user.GetLabelsNames())
class CheckAccessHelperTest(test_lib.AFF4ObjectTest):
def setUp(self):
super(CheckAccessHelperTest, self).setUp()
self.helper = user_managers.CheckAccessHelper("test")
self.subject = rdfvalue.RDFURN("aff4:/some/path")
def testReturnsFalseByDefault(self):
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess, self.subject, self.token)
def testReturnsFalseOnFailedMatch(self):
self.helper.Allow("aff4:/some/otherpath")
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess, self.subject, self.token)
def testReturnsTrueOnMatch(self):
self.helper.Allow("aff4:/some/path")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testReturnsTrueIfOneMatchFails1(self):
self.helper.Allow("aff4:/some/otherpath")
self.helper.Allow("aff4:/some/path")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testReturnsTrueIfOneMatchFails2(self):
self.helper.Allow("aff4:/some/path")
self.helper.Allow("aff4:/some/otherpath")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testFnmatchFormatIsUsedByDefault1(self):
self.helper.Allow("aff4:/some/*")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testFnmatchFormatIsUsedByDefault2(self):
self.helper.Allow("aff4:/some*")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testFnmatchPatternCorrectlyMatchesFilesBelowDirectory(self):
self.helper.Allow("aff4:/some/*")
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess,
rdfvalue.RDFURN("aff4:/some"), self.token)
def testCustomCheckWorksCorrectly(self):
def CustomCheck(unused_subject, unused_token):
return True
self.helper.Allow("aff4:/some/path", CustomCheck)
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def testCustomCheckFailsCorrectly(self):
def CustomCheck(unused_subject, unused_token):
raise access_control.UnauthorizedAccess("Problem")
self.helper.Allow("aff4:/some/path", CustomCheck)
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess, self.subject, self.token)
def testCustomCheckAcceptsAdditionalArguments(self):
def CustomCheck(subject, unused_token, another_subject):
if subject == another_subject:
return True
else:
raise access_control.UnauthorizedAccess("Problem")
self.helper.Allow("aff4:/*", CustomCheck, self.subject)
self.assertRaises(access_control.UnauthorizedAccess,
self.helper.CheckAccess,
rdfvalue.RDFURN("aff4:/some/other/path"),
self.token)
self.assertTrue(self.helper.CheckAccess(self.subject, self.token))
def Ok(self, subject, access="r"):
self.assertTrue(
self.access_manager.CheckDataStoreAccess(self.token, [subject], access))
def NotOk(self, subject, access="r"):
self.assertRaises(
access_control.UnauthorizedAccess,
self.access_manager.CheckDataStoreAccess,
self.token, [subject], access)
def testReadSomePaths(self):
"""Tests some real world paths."""
self.access_manager = user_managers.FullAccessControlManager()
access = "r"
self.Ok("aff4:/", access)
self.Ok("aff4:/users", access)
self.NotOk("aff4:/users/randomuser", access)
self.Ok("aff4:/blobs", access)
self.Ok("aff4:/blobs/12345678", access)
self.Ok("aff4:/FP", access)
self.Ok("aff4:/FP/12345678", access)
self.Ok("aff4:/files", access)
self.Ok("aff4:/files/12345678", access)
self.Ok("aff4:/ACL", access)
self.Ok("aff4:/ACL/randomuser", access)
self.Ok("aff4:/stats", access)
self.Ok("aff4:/stats/FileStoreStats", access)
self.Ok("aff4:/config", access)
self.Ok("aff4:/config/drivers", access)
self.Ok("aff4:/config/drivers/windows/memory/winpmem.amd64.sys", access)
self.Ok("aff4:/flows", access)
self.Ok("aff4:/flows/W:12345678", access)
self.Ok("aff4:/hunts", access)
self.Ok("aff4:/hunts/W:12345678/C.1234567890123456", access)
self.Ok("aff4:/hunts/W:12345678/C.1234567890123456/W:AAAAAAAA", access)
self.Ok("aff4:/cron", access)
self.Ok("aff4:/cron/OSBreakDown", access)
self.Ok("aff4:/crashes", access)
self.Ok("aff4:/crashes/Stream", access)
self.Ok("aff4:/audit", access)
self.Ok("aff4:/audit/log", access)
self.Ok("aff4:/C.0000000000000001", access)
self.NotOk("aff4:/C.0000000000000001/fs/os", access)
self.NotOk("aff4:/C.0000000000000001/flows/W:12345678", access)
self.Ok("aff4:/tmp", access)
self.Ok("aff4:/tmp/C8FAFC0F", access)
def testQuerySomePaths(self):
"""Tests some real world paths."""
self.access_manager = user_managers.FullAccessControlManager()
access = "rq"
self.NotOk("aff4:/", access)
self.NotOk("aff4:/users", access)
self.NotOk("aff4:/users/randomuser", access)
self.NotOk("aff4:/blobs", access)
self.NotOk("aff4:/FP", access)
self.NotOk("aff4:/files", access)
self.Ok("aff4:/files/hash/generic/sha256/" + "a" * 64, access)
self.Ok("aff4:/ACL", access)
self.Ok("aff4:/ACL/randomuser", access)
self.NotOk("aff4:/stats", access)
self.Ok("aff4:/config", access)
self.Ok("aff4:/config/drivers", access)
self.Ok("aff4:/config/drivers/windows/memory/winpmem.amd64.sys", access)
self.NotOk("aff4:/flows", access)
self.Ok("aff4:/flows/W:12345678", access)
self.Ok("aff4:/hunts", access)
self.Ok("aff4:/hunts/W:12345678/C.1234567890123456", access)
self.Ok("aff4:/hunts/W:12345678/C.1234567890123456/W:AAAAAAAA", access)
self.Ok("aff4:/cron", access)
self.Ok("aff4:/cron/OSBreakDown", access)
self.NotOk("aff4:/crashes", access)
self.NotOk("aff4:/audit", access)
self.Ok("aff4:/C.0000000000000001", access)
self.NotOk("aff4:/C.0000000000000001/fs/os", access)
self.NotOk("aff4:/C.0000000000000001/flows", access)
self.NotOk("aff4:/tmp", access)
| ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert | lib/aff4_objects/user_managers_test.py | Python | apache-2.0 | 7,156 |
"""Add same_tool and tool_id
Revision ID: 16f121110a0f
Revises: 2c58f1b857f1
Create Date: 2015-11-09 12:28:43.019410
"""
# revision identifiers, used by Alembic.
revision = '16f121110a0f'
down_revision = '2c58f1b857f1'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('ActiveTranslationMessages', sa.Column('same_tool', sa.Boolean(), nullable=True))
op.add_column('ActiveTranslationMessages', sa.Column('tool_id', sa.Unicode(length=255), nullable=True))
op.create_index(u'ix_ActiveTranslationMessages_same_tool', 'ActiveTranslationMessages', ['same_tool'], unique=False)
op.create_index(u'ix_ActiveTranslationMessages_tool_id', 'ActiveTranslationMessages', ['tool_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_ActiveTranslationMessages_tool_id', table_name='ActiveTranslationMessages')
op.drop_index(u'ix_ActiveTranslationMessages_same_tool', table_name='ActiveTranslationMessages')
op.drop_column('ActiveTranslationMessages', 'tool_id')
op.drop_column('ActiveTranslationMessages', 'same_tool')
### end Alembic commands ###
| go-lab/appcomposer | alembic/versions/16f121110a0f_add_same_tool_and_tool_id.py | Python | bsd-2-clause | 1,266 |
# -*- coding: utf-8 -*-
from itertools import product
from itertools import permutations
from numba import njit
from numba.core import types, utils
import unittest
from numba.tests.support import (TestCase, no_pyobj_flags, MemoryLeakMixin)
from numba.core.errors import TypingError, UnsupportedError
from numba.cpython.unicode import _MAX_UNICODE
from numba.core.types.functions import _header_lead
from numba.extending import overload
_py37_or_later = utils.PYVERSION >= (3, 7)
def isascii(s):
return all(ord(c) < 128 for c in s)
def literal_usecase():
return '大处着眼,小处着手。'
def passthrough_usecase(x):
return x
def eq_usecase(x, y):
return x == y
def len_usecase(x):
return len(x)
def bool_usecase(x):
return bool(x)
def getitem_usecase(x, i):
return x[i]
def getitem_check_kind_usecase(x, i):
return hash(x[i])
def zfill_usecase(x, y):
return x.zfill(y)
def concat_usecase(x, y):
return x + y
def repeat_usecase(x, y):
return x * y
def inplace_concat_usecase(x, y):
x += y
return x
def in_usecase(x, y):
return x in y
def lt_usecase(x, y):
return x < y
def le_usecase(x, y):
return x <= y
def gt_usecase(x, y):
return x > y
def ge_usecase(x, y):
return x >= y
def partition_usecase(s, sep):
return s.partition(sep)
def find_usecase(x, y):
return x.find(y)
def find_with_start_only_usecase(x, y, start):
return x.find(y, start)
def find_with_start_end_usecase(x, y, start, end):
return x.find(y, start, end)
def rpartition_usecase(s, sep):
return s.rpartition(sep)
def count_usecase(x, y):
return x.count(y)
def count_with_start_usecase(x, y, start):
return x.count(y, start)
def count_with_start_end_usecase(x, y, start, end):
return x.count(y, start, end)
def rfind_usecase(x, y):
return x.rfind(y)
def rfind_with_start_only_usecase(x, y, start):
return x.rfind(y, start)
def rfind_with_start_end_usecase(x, y, start, end):
return x.rfind(y, start, end)
def replace_usecase(s, x, y):
return s.replace(x, y)
def replace_with_count_usecase(s, x, y, count):
return s.replace(x, y, count)
def rindex_usecase(x, y):
return x.rindex(y)
def rindex_with_start_only_usecase(x, y, start):
return x.rindex(y, start)
def rindex_with_start_end_usecase(x, y, start, end):
return x.rindex(y, start, end)
def index_usecase(x, y):
return x.index(y)
def index_with_start_only_usecase(x, y, start):
return x.index(y, start)
def index_with_start_end_usecase(x, y, start, end):
return x.index(y, start, end)
def startswith_usecase(x, y):
return x.startswith(y)
def endswith_usecase(x, y):
return x.endswith(y)
def expandtabs_usecase(s):
return s.expandtabs()
def expandtabs_with_tabsize_usecase(s, tabsize):
return s.expandtabs(tabsize)
def expandtabs_with_tabsize_kwarg_usecase(s, tabsize):
return s.expandtabs(tabsize=tabsize)
def endswith_with_start_only_usecase(x, y, start):
return x.endswith(y, start)
def endswith_with_start_end_usecase(x, y, start, end):
return x.endswith(y, start, end)
def split_usecase(x, y):
return x.split(y)
def split_with_maxsplit_usecase(x, y, maxsplit):
return x.split(y, maxsplit)
def split_with_maxsplit_kwarg_usecase(x, y, maxsplit):
return x.split(y, maxsplit=maxsplit)
def split_whitespace_usecase(x):
return x.split()
def splitlines_usecase(s):
return s.splitlines()
def splitlines_with_keepends_usecase(s, keepends):
return s.splitlines(keepends)
def splitlines_with_keepends_kwarg_usecase(s, keepends):
return s.splitlines(keepends=keepends)
def rsplit_usecase(s, sep):
return s.rsplit(sep)
def rsplit_with_maxsplit_usecase(s, sep, maxsplit):
return s.rsplit(sep, maxsplit)
def rsplit_with_maxsplit_kwarg_usecase(s, sep, maxsplit):
return s.rsplit(sep, maxsplit=maxsplit)
def rsplit_whitespace_usecase(s):
return s.rsplit()
def lstrip_usecase(x):
return x.lstrip()
def lstrip_usecase_chars(x, chars):
return x.lstrip(chars)
def rstrip_usecase(x):
return x.rstrip()
def rstrip_usecase_chars(x, chars):
return x.rstrip(chars)
def strip_usecase(x):
return x.strip()
def strip_usecase_chars(x, chars):
return x.strip(chars)
def join_usecase(x, y):
return x.join(y)
def join_empty_usecase(x):
# hack to make empty typed list
l = ['']
l.pop()
return x.join(l)
def center_usecase(x, y):
return x.center(y)
def center_usecase_fillchar(x, y, fillchar):
return x.center(y, fillchar)
def ljust_usecase(x, y):
return x.ljust(y)
def ljust_usecase_fillchar(x, y, fillchar):
return x.ljust(y, fillchar)
def rjust_usecase(x, y):
return x.rjust(y)
def rjust_usecase_fillchar(x, y, fillchar):
return x.rjust(y, fillchar)
def istitle_usecase(x):
return x.istitle()
def iter_usecase(x):
l = []
for i in x:
l.append(i)
return l
def title(x):
return x.title()
def literal_iter_usecase():
l = []
for i in '大处着眼,小处着手。':
l.append(i)
return l
def enumerated_iter_usecase(x):
buf = ""
scan = 0
for i, s in enumerate(x):
buf += s
scan += 1
return buf, scan
def iter_stopiteration_usecase(x):
n = len(x)
i = iter(x)
for _ in range(n + 1):
next(i)
def literal_iter_stopiteration_usecase():
s = '大处着眼,小处着手。'
i = iter(s)
n = len(s)
for _ in range(n + 1):
next(i)
def islower_usecase(x):
return x.islower()
def lower_usecase(x):
return x.lower()
def ord_usecase(x):
return ord(x)
def chr_usecase(x):
return chr(x)
class BaseTest(MemoryLeakMixin, TestCase):
def setUp(self):
super(BaseTest, self).setUp()
UNICODE_EXAMPLES = [
'',
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
UNICODE_ORDERING_EXAMPLES = [
'',
'a'
'aa',
'aaa',
'b',
'aab',
'ab',
'asc',
'ascih',
'ascii',
'ascij',
'大处着眼,小处着手',
'大处着眼,小处着手。',
'大处着眼,小处着手。🐍⚡',
]
UNICODE_COUNT_EXAMPLES = [
('', ''),
('', 'ascii'),
('ascii', ''),
('asc ii', ' '),
('ascii', 'ci'),
('ascii', 'ascii'),
('ascii', 'Ă'),
('ascii', '大处'),
('ascii', 'étú?'),
('', '大处 着眼,小处着手。大大大处'),
('大处 着眼,小处着手。大大大处', ''),
('大处 着眼,小处着手。大大大处', ' '),
('大处 着眼,小处着手。大大大处', 'ci'),
('大处 着眼,小处着手。大大大处', '大处大处'),
('大处 着眼,小处着手。大大大处', '大处 着眼,小处着手。大大大处'),
('大处 着眼,小处着手。大大大处', 'Ă'),
('大处 着眼,小处着手。大大大处', '大处'),
('大处 着眼,小处着手。大大大处', 'étú?'),
('', 'tú quién te crees?'),
('tú quién te crees?', ''),
('tú quién te crees?', ' '),
('tú quién te crees?', 'ci'),
('tú quién te crees?', 'tú quién te crees?'),
('tú quién te crees?', 'Ă'),
('tú quién te crees?', '大处'),
('tú quién te crees?', 'étú?'),
('abababab', 'a'),
('abababab', 'ab'),
('abababab', 'aba'),
('aaaaaaaaaa', 'aaa'),
('aaaaaaaaaa', 'aĂ'),
('aabbaaaabbaa', 'aa')
]
class TestUnicode(BaseTest):
def test_literal(self, flags=no_pyobj_flags):
pyfunc = literal_usecase
self.run_nullary_func(pyfunc, flags=flags)
def test_passthrough(self, flags=no_pyobj_flags):
pyfunc = passthrough_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_eq(self, flags=no_pyobj_flags):
pyfunc = eq_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in reversed(UNICODE_EXAMPLES):
self.assertEqual(pyfunc(a, b),
cfunc(a, b), '%s, %s' % (a, b))
# comparing against something that's not unicode
self.assertEqual(pyfunc(a, 1),
cfunc(a, 1), '%s, %s' % (a, 1))
self.assertEqual(pyfunc(1, b),
cfunc(1, b), '%s, %s' % (1, b))
def _check_ordering_op(self, usecase):
pyfunc = usecase
cfunc = njit(pyfunc)
# Check comparison to self
for a in UNICODE_ORDERING_EXAMPLES:
self.assertEqual(
pyfunc(a, a),
cfunc(a, a),
'%s: "%s", "%s"' % (usecase.__name__, a, a),
)
# Check comparison to adjacent
for a, b in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
self.assertEqual(
pyfunc(a, b),
cfunc(a, b),
'%s: "%s", "%s"' % (usecase.__name__, a, b),
)
# and reversed
self.assertEqual(
pyfunc(b, a),
cfunc(b, a),
'%s: "%s", "%s"' % (usecase.__name__, b, a),
)
def test_lt(self, flags=no_pyobj_flags):
self._check_ordering_op(lt_usecase)
def test_le(self, flags=no_pyobj_flags):
self._check_ordering_op(le_usecase)
def test_gt(self, flags=no_pyobj_flags):
self._check_ordering_op(gt_usecase)
def test_ge(self, flags=no_pyobj_flags):
self._check_ordering_op(ge_usecase)
def test_len(self, flags=no_pyobj_flags):
pyfunc = len_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_bool(self, flags=no_pyobj_flags):
pyfunc = bool_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_expandtabs(self):
pyfunc = expandtabs_usecase
cfunc = njit(pyfunc)
cases = ['', '\t', 't\tt\t', 'a\t', '\t⚡', 'a\tbc\nab\tc',
'🐍\t⚡', '🐍⚡\n\t\t🐍\t', 'ab\rab\t\t\tab\r\n\ta']
msg = 'Results of "{}".expandtabs() must be equal'
for s in cases:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_expandtabs_with_tabsize(self):
fns = [njit(expandtabs_with_tabsize_usecase),
njit(expandtabs_with_tabsize_kwarg_usecase)]
messages = ['Results of "{}".expandtabs({}) must be equal',
'Results of "{}".expandtabs(tabsize={}) must be equal']
cases = ['', '\t', 't\tt\t', 'a\t', '\t⚡', 'a\tbc\nab\tc',
'🐍\t⚡', '🐍⚡\n\t\t🐍\t', 'ab\rab\t\t\tab\r\n\ta']
for s in cases:
for tabsize in range(-1, 10):
for fn, msg in zip(fns, messages):
self.assertEqual(fn.py_func(s, tabsize), fn(s, tabsize),
msg=msg.format(s, tabsize))
def test_expandtabs_exception_noninteger_tabsize(self):
pyfunc = expandtabs_with_tabsize_usecase
cfunc = njit(pyfunc)
accepted_types = (types.Integer, int)
with self.assertRaises(TypingError) as raises:
cfunc('\t', 2.4)
msg = '"tabsize" must be {}, not float'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_startswith(self, flags=no_pyobj_flags):
pyfunc = startswith_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in ['', 'x', a[:-2], a[3:], a, a + a]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
'%s, %s' % (a, b))
def test_endswith(self, flags=no_pyobj_flags):
pyfunc = endswith_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in ['', 'x', a[:-2], a[3:], a, a + a]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
'%s, %s' % (a, b))
def test_endswith_default(self):
pyfunc = endswith_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
msg = 'Results "{}".endswith("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_endswith_with_start(self):
pyfunc = endswith_with_start_only_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
msg = 'Results "{}".endswith("{}", {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_endswith_with_start_end(self):
pyfunc = endswith_with_start_end_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#LL1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
for end in list(range(-20, 20)) + [None]:
msg = 'Results "{}".endswith("{}", {}, {})\
must be equal'
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_endswith_tuple(self):
pyfunc = endswith_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
msg = 'Results "{}".endswith({}) must be equal'
tuple_subs = (sub_str, 'lo')
self.assertEqual(pyfunc(s, tuple_subs),
cfunc(s, tuple_subs),
msg=msg.format(s, tuple_subs))
def test_endswith_tuple_args(self):
pyfunc = endswith_with_start_end_usecase
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L1049-L1099 # noqa: E501
cpython_str = ['hello', 'helloworld', '']
cpython_subs = [
'he', 'hello', 'helloworld', 'ello',
'', 'lowo', 'lo', 'he', 'lo', 'o',
]
extra_subs = ['hellohellohello', ' ']
for s in cpython_str + UNICODE_EXAMPLES:
default_subs = ['', 'x', s[:-2], s[3:], s, s + s]
for sub_str in cpython_subs + default_subs + extra_subs:
for start in list(range(-20, 20)) + [None]:
for end in list(range(-20, 20)) + [None]:
msg = 'Results "{}".endswith("{}", {}, {})\
must be equal'
tuple_subs = (sub_str, 'lo')
self.assertEqual(pyfunc(s, tuple_subs, start, end),
cfunc(s, tuple_subs, start, end),
msg=msg.format(s, tuple_subs,
start, end))
def test_in(self, flags=no_pyobj_flags):
pyfunc = in_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
extras = ['', 'xx', a[::-1], a[:-2], a[3:], a, a + a]
for substr in extras:
self.assertEqual(pyfunc(substr, a),
cfunc(substr, a),
"'%s' in '%s'?" % (substr, a))
def test_partition_exception_invalid_sep(self):
self.disable_leak_check()
pyfunc = partition_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
accepted_types = (types.UnicodeType, types.UnicodeCharSeq)
with self.assertRaises(TypingError) as raises:
cfunc('a', None)
msg = '"sep" must be {}, not none'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_partition(self):
pyfunc = partition_usecase
cfunc = njit(pyfunc)
CASES = [
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
msg = 'Results of "{}".partition("{}") must be equal'
for s, sep in CASES:
self.assertEqual(pyfunc(s, sep), cfunc(s, sep),
msg=msg.format(s, sep))
def test_find(self, flags=no_pyobj_flags):
pyfunc = find_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', 'xx', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L202-L231 # noqa: E501
cpython_subs = [
('a' * 100 + '\u0102', ['\u0102', '\u0201', '\u0120', '\u0220']),
('a' * 100 + '\U00100304', ['\U00100304', '\U00100204',
'\U00102004']),
('\u0102' * 100 + 'a', ['a']),
('\U00100304' * 100 + 'a', ['a']),
('\U00100304' * 100 + '\u0102', ['\u0102']),
('a' * 100, ['\u0102', '\U00100304', 'a\u0102', 'a\U00100304']),
('\u0102' * 100, ['\U00100304', '\u0102\U00100304']),
('\u0102' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + '\u0102_', ['\u0102_']),
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".find("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_find_with_start_only(self):
pyfunc = find_with_start_only_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start in list(range(-20, 20)) + [None]:
msg = 'Results "{}".find("{}", {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_find_with_start_end(self):
pyfunc = find_with_start_end_usecase
cfunc = njit(pyfunc)
starts = ends = list(range(-20, 20)) + [None]
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start, end in product(starts, ends):
msg = 'Results of "{}".find("{}", {}, {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_find_exception_noninteger_start_end(self):
pyfunc = find_with_start_end_usecase
cfunc = njit(pyfunc)
accepted = (types.Integer, types.NoneType)
for start, end, name in [(0.1, 5, 'start'), (0, 0.5, 'end')]:
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'sci', start, end)
msg = '"{}" must be {}, not float'.format(name, accepted)
self.assertIn(msg, str(raises.exception))
def test_rpartition_exception_invalid_sep(self):
self.disable_leak_check()
pyfunc = rpartition_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
accepted_types = (types.UnicodeType, types.UnicodeCharSeq)
with self.assertRaises(TypingError) as raises:
cfunc('a', None)
msg = '"sep" must be {}, not none'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_rpartition(self):
pyfunc = rpartition_usecase
cfunc = njit(pyfunc)
CASES = [
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
msg = 'Results of "{}".rpartition("{}") must be equal'
for s, sep in CASES:
self.assertEqual(pyfunc(s, sep), cfunc(s, sep),
msg=msg.format(s, sep))
def test_count(self):
pyfunc = count_usecase
cfunc = njit(pyfunc)
error_msg = "'{0}'.py_count('{1}') = {2}\n'{0}'.c_count('{1}') = {3}"
for s, sub in UNICODE_COUNT_EXAMPLES:
py_result = pyfunc(s, sub)
c_result = cfunc(s, sub)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, py_result, c_result))
def test_count_with_start(self):
pyfunc = count_with_start_usecase
cfunc = njit(pyfunc)
error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}) = {3}",
"'{0}'.c_count('{1}', {2}) = {4}")
for s, sub in UNICODE_COUNT_EXAMPLES:
for i in range(-18, 18):
py_result = pyfunc(s, sub, i)
c_result = cfunc(s, sub, i)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, i, py_result,
c_result))
py_result = pyfunc(s, sub, None)
c_result = cfunc(s, sub, None)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, None, py_result,
c_result))
def test_count_with_start_end(self):
pyfunc = count_with_start_end_usecase
cfunc = njit(pyfunc)
error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}, {3}) = {4}",
"'{0}'.c_count('{1}', {2}, {3}) = {5}")
for s, sub in UNICODE_COUNT_EXAMPLES:
for i, j in product(range(-18, 18), (-18, 18)):
py_result = pyfunc(s, sub, i, j)
c_result = cfunc(s, sub, i, j)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, i, j, py_result,
c_result))
for j in range(-18, 18):
py_result = pyfunc(s, sub, None, j)
c_result = cfunc(s, sub, None, j)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, None, j, py_result,
c_result))
py_result = pyfunc(s, sub, None, None)
c_result = cfunc(s, sub, None, None)
self.assertEqual(py_result, c_result,
error_msg.format(s, sub, None, None, py_result,
c_result))
def test_count_arg_type_check(self):
cfunc = njit(count_with_start_end_usecase)
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'c', 1, 0.5)
self.assertIn('The slice indices must be an Integer or None',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'c', 1.2, 7)
self.assertIn('The slice indices must be an Integer or None',
str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 12, 1, 7)
self.assertIn('The substring must be a UnicodeType, not',
str(raises.exception))
def test_count_optional_arg_type_check(self):
pyfunc = count_with_start_end_usecase
def try_compile_bad_optional(*args):
bad_sig = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.float64),
types.Optional(types.float64))
njit([bad_sig])(pyfunc)
with self.assertRaises(TypingError) as raises:
try_compile_bad_optional('tú quis?', 'tú', 1.1, 1.1)
self.assertIn('The slice indices must be an Integer or None',
str(raises.exception))
error_msg = "%s\n%s" % ("'{0}'.py_count('{1}', {2}, {3}) = {4}",
"'{0}'.c_count_op('{1}', {2}, {3}) = {5}")
sig_optional = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.int64),
types.Optional(types.int64))
cfunc_optional = njit([sig_optional])(pyfunc)
py_result = pyfunc('tú quis?', 'tú', 0, 8)
c_result = cfunc_optional('tú quis?', 'tú', 0, 8)
self.assertEqual(py_result, c_result,
error_msg.format('tú quis?', 'tú', 0, 8, py_result,
c_result))
def test_rfind(self):
pyfunc = rfind_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', 'xx', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L233-L259 # noqa: E501
cpython_subs = [
('\u0102' + 'a' * 100, ['\u0102', '\u0201', '\u0120', '\u0220']),
('\U00100304' + 'a' * 100, ['\U00100304', '\U00100204',
'\U00102004']),
('abcdefghiabc', ['abc', '']),
('a' + '\u0102' * 100, ['a']),
('a' + '\U00100304' * 100, ['a']),
('\u0102' + '\U00100304' * 100, ['\u0102']),
('a' * 100, ['\u0102', '\U00100304', '\u0102a', '\U00100304a']),
('\u0102' * 100, ['\U00100304', '\U00100304\u0102']),
('_a' + '\u0102' * 100, ['_a']),
('_a' + '\U00100304' * 100, ['_a']),
('_\u0102' + '\U00100304' * 100, ['_\u0102']),
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".rfind("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_rfind_with_start_only(self):
pyfunc = rfind_with_start_only_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start in list(range(-20, 20)) + [None]:
msg = 'Results "{}".rfind("{}", {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_rfind_with_start_end(self):
pyfunc = rfind_with_start_end_usecase
cfunc = njit(pyfunc)
starts = list(range(-20, 20)) + [None]
ends = list(range(-20, 20)) + [None]
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
for start, end in product(starts, ends):
msg = 'Results of "{}".rfind("{}", {}, {}) must be equal'
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_rfind_wrong_substr(self):
cfunc = njit(rfind_usecase)
for s in UNICODE_EXAMPLES:
for sub_str in [None, 1, False]:
with self.assertRaises(TypingError) as raises:
cfunc(s, sub_str)
msg = 'must be {}'.format(types.UnicodeType)
self.assertIn(msg, str(raises.exception))
def test_rfind_wrong_start_end(self):
cfunc = njit(rfind_with_start_end_usecase)
accepted_types = (types.Integer, types.NoneType)
for s in UNICODE_EXAMPLES:
for sub_str in ['', 'xx', s[:-2], s[3:], s]:
# test wrong start
for start, end in product([0.1, False], [-1, 1]):
with self.assertRaises(TypingError) as raises:
cfunc(s, sub_str, start, end)
msg = '"start" must be {}'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
# test wrong end
for start, end in product([-1, 1], [-0.1, True]):
with self.assertRaises(TypingError) as raises:
cfunc(s, sub_str, start, end)
msg = '"end" must be {}'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_rfind_wrong_start_end_optional(self):
s = UNICODE_EXAMPLES[0]
sub_str = s[1:-1]
accepted_types = (types.Integer, types.NoneType)
msg = 'must be {}'.format(accepted_types)
def try_compile_wrong_start_optional(*args):
wrong_sig_optional = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.float64),
types.Optional(types.intp))
njit([wrong_sig_optional])(rfind_with_start_end_usecase)
with self.assertRaises(TypingError) as raises:
try_compile_wrong_start_optional(s, sub_str, 0.1, 1)
self.assertIn(msg, str(raises.exception))
def try_compile_wrong_end_optional(*args):
wrong_sig_optional = types.int64(types.unicode_type,
types.unicode_type,
types.Optional(types.intp),
types.Optional(types.float64))
njit([wrong_sig_optional])(rfind_with_start_end_usecase)
with self.assertRaises(TypingError) as raises:
try_compile_wrong_end_optional(s, sub_str, 1, 0.1)
self.assertIn(msg, str(raises.exception))
def test_rindex(self):
pyfunc = rindex_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L284-L308 # noqa: E501
cpython_subs = [
('abcdefghiabc', ['', 'def', 'abc']),
('a' + '\u0102' * 100, ['a']),
('a' + '\U00100304' * 100, ['a']),
('\u0102' + '\U00100304' * 100, ['\u0102']),
('_a' + '\u0102' * 100, ['_a']),
('_a' + '\U00100304' * 100, ['_a']),
('_\u0102' + '\U00100304' * 100, ['_\u0102'])
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".rindex("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_index(self):
pyfunc = index_usecase
cfunc = njit(pyfunc)
default_subs = [
(s, ['', s[:-2], s[3:], s]) for s in UNICODE_EXAMPLES
]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L260-L282 # noqa: E501
cpython_subs = [
('abcdefghiabc', ['', 'def', 'abc']),
('\u0102' * 100 + 'a', ['a']),
('\U00100304' * 100 + 'a', ['a']),
('\U00100304' * 100 + '\u0102', ['\u0102']),
('\u0102' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + 'a_', ['a_']),
('\U00100304' * 100 + '\u0102_', ['\u0102_'])
]
for s, subs in default_subs + cpython_subs:
for sub_str in subs:
msg = 'Results "{}".index("{}") must be equal'
self.assertEqual(pyfunc(s, sub_str), cfunc(s, sub_str),
msg=msg.format(s, sub_str))
def test_index_rindex_with_start_only(self):
pyfuncs = [index_with_start_only_usecase,
rindex_with_start_only_usecase]
messages = ['Results "{}".index("{}", {}) must be equal',
'Results "{}".rindex("{}", {}) must be equal']
unicode_examples = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'大处着眼,小处着手。',
]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for s in unicode_examples:
l = len(s)
cases = [
('', list(range(-10, l + 1))),
(s[:-2], [0] + list(range(-10, 1 - l))),
(s[3:], list(range(4)) + list(range(-10, 4 - l))),
(s, [0] + list(range(-10, 1 - l))),
]
for sub_str, starts in cases:
for start in starts + [None]:
self.assertEqual(pyfunc(s, sub_str, start),
cfunc(s, sub_str, start),
msg=msg.format(s, sub_str, start))
def test_index_rindex_with_start_end(self):
pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase]
messages = ['Results of "{}".index("{}", {}, {}) must be equal',
'Results of "{}".rindex("{}", {}, {}) must be equal']
unicode_examples = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'大处着眼,小处着手。',
]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for s in unicode_examples:
l = len(s)
cases = [
('', list(range(-10, l + 1)), list(range(l, 10))),
(s[:-2], [0] + list(range(-10, 1 - l)),
[-2, -1] + list(range(l - 2, 10))),
(s[3:], list(range(4)) + list(range(-10, -1)),
list(range(l, 10))),
(s, [0] + list(range(-10, 1 - l)), list(range(l, 10))),
]
for sub_str, starts, ends in cases:
for start, end in product(starts + [None], ends):
self.assertEqual(pyfunc(s, sub_str, start, end),
cfunc(s, sub_str, start, end),
msg=msg.format(s, sub_str, start, end))
def test_index_rindex_exception_substring_not_found(self):
self.disable_leak_check()
unicode_examples = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'大处着眼,小处着手。',
]
pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase]
for pyfunc in pyfuncs:
cfunc = njit(pyfunc)
for s in unicode_examples:
l = len(s)
cases = [
('', list(range(l + 1, 10)), [l]),
(s[:-2], [0], list(range(l - 2))),
(s[3:], list(range(4, 10)), [l]),
(s, [None], list(range(l))),
]
for sub_str, starts, ends in cases:
for start, end in product(starts, ends):
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func(s, sub_str, start, end)
msg = 'substring not found'
self.assertIn(msg, str(raises.exception))
def test_index_rindex_exception_noninteger_start_end(self):
accepted = (types.Integer, types.NoneType)
pyfuncs = [index_with_start_end_usecase, rindex_with_start_end_usecase]
for pyfunc in pyfuncs:
cfunc = njit(pyfunc)
for start, end, name in [(0.1, 5, 'start'), (0, 0.5, 'end')]:
with self.assertRaises(TypingError) as raises:
cfunc('ascii', 'sci', start, end)
msg = '"{}" must be {}, not float'.format(name, accepted)
self.assertIn(msg, str(raises.exception))
def test_getitem(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s), len(s)):
self.assertEqual(pyfunc(s, i),
cfunc(s, i),
"'%s'[%d]?" % (s, i))
def test_getitem_scalar_kind(self):
# See issue #6135, make sure that getitem returns a char of the minimal
# kind required to represent the "got" item, this is done via the use
# of `hash` in the test function as it is sensitive to kind.
pyfunc = getitem_check_kind_usecase
cfunc = njit(pyfunc)
samples = ['a\u1234', '¡着']
for s in samples:
for i in range(-len(s), len(s)):
self.assertEqual(pyfunc(s, i),
cfunc(s, i),
"'%s'[%d]?" % (s, i))
def test_getitem_error(self):
self.disable_leak_check()
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
with self.assertRaises(IndexError) as raises:
pyfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
with self.assertRaises(IndexError) as raises:
cfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
def test_slice2(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in list(range(-len(s), len(s))):
for j in list(range(-len(s), len(s))):
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice2_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_getitem_slice2_kind(self):
# See issue #6135. Also see note in test_getitem_scalar_kind regarding
# testing.
pyfunc = getitem_check_kind_usecase
cfunc = njit(pyfunc)
samples = ['abc\u1234\u1234', '¡¡¡着着着']
for s in samples:
for i in [-2, -1, 0, 1, 2, len(s), len(s) + 1]:
for j in [-2, -1, 0, 1, 2, len(s), len(s) + 1]:
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice3(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s), len(s)):
for j in range(-len(s), len(s)):
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_getitem_slice3_kind(self):
# See issue #6135. Also see note in test_getitem_scalar_kind regarding
# testing.
pyfunc = getitem_check_kind_usecase
cfunc = njit(pyfunc)
samples = ['abc\u1234\u1234',
'a\u1234b\u1234c'
'¡¡¡着着着',
'¡着¡着¡着',
'着a着b着c',
'¡着a¡着b¡着c',
'¡着a着¡c',]
for s in samples:
for i in range(-len(s), len(s)):
for j in range(-len(s), len(s)):
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_slice3_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_slice_ascii_flag(self):
"""
Make sure ascii flag is False when ascii and non-ascii characters are
mixed in output of Unicode slicing.
"""
@njit
def f(s):
return s[::2]._is_ascii, s[1::2]._is_ascii
s = "¿abc¡Y tú, quién te cre\t\tes?"
self.assertEqual(f(s), (0, 1))
def test_zfill(self):
pyfunc = zfill_usecase
cfunc = njit(pyfunc)
ZFILL_INPUTS = [
'ascii',
'+ascii',
'-ascii',
'-asc ii-',
'12345',
'-12345',
'+12345',
'',
'¡Y tú crs?',
'🐍⚡',
'+🐍⚡',
'-🐍⚡',
'大眼,小手。',
'+大眼,小手。',
'-大眼,小手。',
]
with self.assertRaises(TypingError) as raises:
cfunc(ZFILL_INPUTS[0], 1.1)
self.assertIn('<width> must be an Integer', str(raises.exception))
for s in ZFILL_INPUTS:
for width in range(-3, 20):
self.assertEqual(pyfunc(s, width),
cfunc(s, width))
def test_concat(self, flags=no_pyobj_flags):
pyfunc = concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_repeat(self, flags=no_pyobj_flags):
pyfunc = repeat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in (-1, 0, 1, 2, 3, 4, 5, 7, 8, 15, 70):
self.assertEqual(pyfunc(a, b),
cfunc(a, b))
self.assertEqual(pyfunc(b, a),
cfunc(b, a))
def test_repeat_exception_float(self):
self.disable_leak_check()
cfunc = njit(repeat_usecase)
with self.assertRaises(TypingError) as raises:
cfunc('hi', 2.5)
self.assertIn(_header_lead + ' Function(<built-in function mul>)',
str(raises.exception))
def test_split_exception_empty_sep(self):
self.disable_leak_check()
pyfunc = split_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
def test_split_exception_noninteger_maxsplit(self):
pyfunc = split_with_maxsplit_usecase
cfunc = njit(pyfunc)
# Handle non-integer maxsplit exception
for sep in [' ', None]:
with self.assertRaises(TypingError) as raises:
cfunc('a', sep, 2.4)
self.assertIn('float64', str(raises.exception),
'non-integer maxsplit with sep = %s' % sep)
def test_split(self):
pyfunc = split_usecase
cfunc = njit(pyfunc)
CASES = [
(' a ', None),
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
for test_str, splitter in CASES:
self.assertEqual(pyfunc(test_str, splitter),
cfunc(test_str, splitter),
"'%s'.split('%s')?" % (test_str, splitter))
def test_split_with_maxsplit(self):
CASES = [
(' a ', None, 1),
('', '⚡', 1),
('abcabc', '⚡', 1),
('🐍⚡', '⚡', 1),
('🐍⚡🐍', '⚡', 1),
('abababa', 'a', 2),
('abababa', 'b', 1),
('abababa', 'c', 2),
('abababa', 'ab', 1),
('abababa', 'aba', 5),
]
for pyfunc, fmt_str in [(split_with_maxsplit_usecase,
"'%s'.split('%s', %d)?"),
(split_with_maxsplit_kwarg_usecase,
"'%s'.split('%s', maxsplit=%d)?")]:
cfunc = njit(pyfunc)
for test_str, splitter, maxsplit in CASES:
self.assertEqual(pyfunc(test_str, splitter, maxsplit),
cfunc(test_str, splitter, maxsplit),
fmt_str % (test_str, splitter, maxsplit))
def test_split_whitespace(self):
# explicit sep=None cases covered in test_split and
# test_split_with_maxsplit
pyfunc = split_whitespace_usecase
cfunc = njit(pyfunc)
# list copied from
# https://github.com/python/cpython/blob/master/Objects/unicodetype_db.h
all_whitespace = ''.join(map(chr, [
0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x001C, 0x001D, 0x001E,
0x001F, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002,
0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A,
0x2028, 0x2029, 0x202F, 0x205F, 0x3000
]))
CASES = [
'',
'abcabc',
'🐍 ⚡',
'🐍 ⚡ 🐍',
'🐍 ⚡ 🐍 ',
' 🐍 ⚡ 🐍',
' 🐍' + all_whitespace + '⚡ 🐍 ',
]
for test_str in CASES:
self.assertEqual(pyfunc(test_str),
cfunc(test_str),
"'%s'.split()?" % (test_str,))
def test_split_exception_invalid_keepends(self):
pyfunc = splitlines_with_keepends_usecase
cfunc = njit(pyfunc)
accepted_types = (types.Integer, int, types.Boolean, bool)
for ty, keepends in (('none', None), ('unicode_type', 'None')):
with self.assertRaises(TypingError) as raises:
cfunc('\n', keepends)
msg = '"keepends" must be {}, not {}'.format(accepted_types, ty)
self.assertIn(msg, str(raises.exception))
def test_splitlines(self):
pyfunc = splitlines_usecase
cfunc = njit(pyfunc)
cases = ['', '\n', 'abc\r\rabc\r\n', '🐍⚡\v', '\f🐍⚡\f\v\v🐍\x85',
'\u2028aba\u2029baba', '\n\r\na\v\fb\x0b\x0cc\x1c\x1d\x1e']
msg = 'Results of "{}".splitlines() must be equal'
for s in cases:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_splitlines_with_keepends(self):
pyfuncs = [
splitlines_with_keepends_usecase,
splitlines_with_keepends_kwarg_usecase
]
messages = [
'Results of "{}".splitlines({}) must be equal',
'Results of "{}".splitlines(keepends={}) must be equal'
]
cases = ['', '\n', 'abc\r\rabc\r\n', '🐍⚡\v', '\f🐍⚡\f\v\v🐍\x85',
'\u2028aba\u2029baba', '\n\r\na\v\fb\x0b\x0cc\x1c\x1d\x1e']
all_keepends = [True, False, 0, 1, -1, 100]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for s, keepends in product(cases, all_keepends):
self.assertEqual(pyfunc(s, keepends), cfunc(s, keepends),
msg=msg.format(s, keepends))
def test_rsplit_exception_empty_sep(self):
self.disable_leak_check()
pyfunc = rsplit_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
def test_rsplit_exception_noninteger_maxsplit(self):
pyfunc = rsplit_with_maxsplit_usecase
cfunc = njit(pyfunc)
accepted_types = (types.Integer, int)
for sep in [' ', None]:
with self.assertRaises(TypingError) as raises:
cfunc('a', sep, 2.4)
msg = '"maxsplit" must be {}, not float'.format(accepted_types)
self.assertIn(msg, str(raises.exception))
def test_rsplit(self):
pyfunc = rsplit_usecase
cfunc = njit(pyfunc)
CASES = [
(' a ', None),
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
msg = 'Results of "{}".rsplit("{}") must be equal'
for s, sep in CASES:
self.assertEqual(pyfunc(s, sep), cfunc(s, sep),
msg=msg.format(s, sep))
def test_rsplit_with_maxsplit(self):
pyfuncs = [rsplit_with_maxsplit_usecase,
rsplit_with_maxsplit_kwarg_usecase]
CASES = [
(' a ', None, 1),
('', '⚡', 1),
('abcabc', '⚡', 1),
('🐍⚡', '⚡', 1),
('🐍⚡🐍', '⚡', 1),
('abababa', 'a', 2),
('abababa', 'b', 1),
('abababa', 'c', 2),
('abababa', 'ab', 1),
('abababa', 'aba', 5),
]
messages = [
'Results of "{}".rsplit("{}", {}) must be equal',
'Results of "{}".rsplit("{}", maxsplit={}) must be equal'
]
for pyfunc, msg in zip(pyfuncs, messages):
cfunc = njit(pyfunc)
for test_str, sep, maxsplit in CASES:
self.assertEqual(pyfunc(test_str, sep, maxsplit),
cfunc(test_str, sep, maxsplit),
msg=msg.format(test_str, sep, maxsplit))
def test_rsplit_whitespace(self):
pyfunc = rsplit_whitespace_usecase
cfunc = njit(pyfunc)
# list copied from
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Objects/unicodetype_db.h#L5996-L6031 # noqa: E501
all_whitespace = ''.join(map(chr, [
0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x001C, 0x001D, 0x001E,
0x001F, 0x0020, 0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002,
0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A,
0x2028, 0x2029, 0x202F, 0x205F, 0x3000
]))
CASES = [
'',
'abcabc',
'🐍 ⚡',
'🐍 ⚡ 🐍',
'🐍 ⚡ 🐍 ',
' 🐍 ⚡ 🐍',
' 🐍' + all_whitespace + '⚡ 🐍 ',
]
msg = 'Results of "{}".rsplit() must be equal'
for s in CASES:
self.assertEqual(pyfunc(s), cfunc(s), msg.format(s))
def test_join_empty(self):
# Can't pass empty list to nopython mode, so we have to make a
# separate test case
pyfunc = join_empty_usecase
cfunc = njit(pyfunc)
CASES = [
'',
'🐍🐍🐍',
]
for sep in CASES:
self.assertEqual(pyfunc(sep),
cfunc(sep),
"'%s'.join([])?" % (sep,))
def test_join_non_string_exception(self):
# Verify that join of list of integers raises typing exception
pyfunc = join_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
with self.assertRaises(TypingError) as raises:
cfunc('', [1, 2, 3])
# This error message is obscure, but indicates the error was trapped
# in the typing of str.join()
# Feel free to change this as we update error messages.
exc_message = str(raises.exception)
self.assertIn(
"During: resolving callee type: BoundFunction",
exc_message,
)
# could be int32 or int64
self.assertIn("reflected list(int", exc_message)
def test_join(self):
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('', ['', '', '']),
('a', ['', '', '']),
('', ['a', 'bbbb', 'c']),
('🐍🐍🐍', ['⚡⚡'] * 5),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_join_interleave_str(self):
# can pass a string as the parts iterable
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '123'),
('🐍🐍🐍', '⚡⚡'),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_justification(self):
for pyfunc, case_name in [(center_usecase, 'center'),
(ljust_usecase, 'ljust'),
(rjust_usecase, 'rjust')]:
cfunc = njit(pyfunc)
with self.assertRaises(TypingError) as raises:
cfunc(UNICODE_EXAMPLES[0], 1.1)
self.assertIn('The width must be an Integer', str(raises.exception))
for s in UNICODE_EXAMPLES:
for width in range(-3, 20):
self.assertEqual(pyfunc(s, width),
cfunc(s, width),
"'%s'.%s(%d)?" % (s, case_name, width))
def test_justification_fillchar(self):
for pyfunc, case_name in [(center_usecase_fillchar, 'center'),
(ljust_usecase_fillchar, 'ljust'),
(rjust_usecase_fillchar, 'rjust')]:
cfunc = njit(pyfunc)
# allowed fillchar cases
for fillchar in [' ', '+', 'ú', '处']:
with self.assertRaises(TypingError) as raises:
cfunc(UNICODE_EXAMPLES[0], 1.1, fillchar)
self.assertIn('The width must be an Integer',
str(raises.exception))
for s in UNICODE_EXAMPLES:
for width in range(-3, 20):
self.assertEqual(pyfunc(s, width, fillchar),
cfunc(s, width, fillchar),
"'%s'.%s(%d, '%s')?" % (s, case_name,
width,
fillchar))
def test_justification_fillchar_exception(self):
self.disable_leak_check()
for pyfunc in [center_usecase_fillchar,
ljust_usecase_fillchar,
rjust_usecase_fillchar]:
cfunc = njit(pyfunc)
# disallowed fillchar cases
for fillchar in ['', '+0', 'quién', '处着']:
with self.assertRaises(ValueError) as raises:
cfunc(UNICODE_EXAMPLES[0], 20, fillchar)
self.assertIn('The fill character must be exactly one',
str(raises.exception))
# forbid fillchar cases with different types
for fillchar in [1, 1.1]:
with self.assertRaises(TypingError) as raises:
cfunc(UNICODE_EXAMPLES[0], 20, fillchar)
self.assertIn('The fillchar must be a UnicodeType',
str(raises.exception))
def test_inplace_concat(self, flags=no_pyobj_flags):
pyfunc = inplace_concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_isidentifier(self):
def pyfunc(s):
return s.isidentifier()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L695-L708 # noqa: E501
cpython = ['a', 'Z', '_', 'b0', 'bc', 'b_', 'µ',
'𝔘𝔫𝔦𝔠𝔬𝔡𝔢', ' ', '[', '©', '0']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isidentifier() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_strip(self):
STRIP_CASES = [
('ass cii', 'ai'),
('ass cii', None),
('asscii', 'ai '),
('asscii ', 'ai '),
(' asscii ', 'ai '),
(' asscii ', 'asci '),
(' asscii ', 's'),
(' ', ' '),
('', ' '),
('', ''),
('', None),
(' ', None),
(' asscii ', 'ai '),
(' asscii ', ''),
(' asscii ', None),
('tú quién te crees?', 'étú? '),
(' tú quién te crees? ', 'étú? '),
(' tú qrees? ', ''),
(' tú quién te crees? ', None),
('大处 着眼,小处着手。大大大处', '大处'),
(' 大处大处 ', ''),
('\t\nabcd\t', '\ta'),
(' 大处大处 ', None),
('\t abcd \t', None),
('\n abcd \n', None),
('\r abcd \r', None),
('\x0b abcd \x0b', None),
('\x0c abcd \x0c', None),
('\u2029abcd\u205F', None),
('\u0085abcd\u2009', None)
]
# form with no parameter
for pyfunc, case_name in [(strip_usecase, 'strip'),
(lstrip_usecase, 'lstrip'),
(rstrip_usecase, 'rstrip')]:
cfunc = njit(pyfunc)
for string, chars in STRIP_CASES:
self.assertEqual(pyfunc(string),
cfunc(string),
"'%s'.%s()?" % (string, case_name))
# parametrized form
for pyfunc, case_name in [(strip_usecase_chars, 'strip'),
(lstrip_usecase_chars, 'lstrip'),
(rstrip_usecase_chars, 'rstrip')]:
cfunc = njit(pyfunc)
sig1 = types.unicode_type(types.unicode_type,
types.Optional(types.unicode_type))
cfunc_optional = njit([sig1])(pyfunc)
def try_compile_bad_optional(*args):
bad = types.unicode_type(types.unicode_type,
types.Optional(types.float64))
njit([bad])(pyfunc)
for fn in cfunc, try_compile_bad_optional:
with self.assertRaises(TypingError) as raises:
fn('tú quis?', 1.1)
self.assertIn('The arg must be a UnicodeType or None',
str(raises.exception))
for fn in cfunc, cfunc_optional:
for string, chars in STRIP_CASES:
self.assertEqual(pyfunc(string, chars),
fn(string, chars),
"'%s'.%s('%s')?" % (string, case_name,
chars))
def test_isspace(self):
def pyfunc(s):
return s.isspace()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L613-L621 # noqa: E501
cpython = ['\u2000', '\u200a', '\u2014', '\U00010401', '\U00010427',
'\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isspace() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_istitle(self):
pyfunc = istitle_usecase
cfunc = njit(pyfunc)
error_msg = "'{0}'.py_istitle() = {1}\n'{0}'.c_istitle() = {2}"
unicode_title = [x.title() for x in UNICODE_EXAMPLES]
special = [
'',
' ',
' AA ',
' Ab ',
'1',
'A123',
'A12Bcd',
'+abA',
'12Abc',
'A12abc',
'%^Abc 5 $% Def'
'𐐁𐐩',
'𐐧𐑎',
'𐐩',
'𐑎',
'🐍 Is',
'🐍 NOT',
'👯Is',
'ῼ',
'Greek ῼitlecases ...'
]
ISTITLE_EXAMPLES = UNICODE_EXAMPLES + unicode_title + special
for s in ISTITLE_EXAMPLES:
py_result = pyfunc(s)
c_result = cfunc(s)
self.assertEqual(py_result, c_result,
error_msg.format(s, py_result, c_result))
def test_isprintable(self):
def pyfunc(s):
return s.isprintable()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L710-L723 # noqa: E501
cpython = ['', ' ', 'abcdefg', 'abcdefg\n', '\u0374', '\u0378',
'\ud800', '\U0001F46F', '\U000E0020']
msg = 'Results of "{}".isprintable() must be equal'
for s in UNICODE_EXAMPLES + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_pointless_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[:]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_walk_backwards(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::-1]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_stride_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::2]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_lt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a < b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_gt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a > b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_comparison(self):
def pyfunc(option, x, y):
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for x, y in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop, x, y]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_concat(self):
def pyfunc(x):
abc = 'abc'
if len(x):
return abc + 'b123' + x + 'IO'
else:
return x + abc + '123' + x
cfunc = njit(pyfunc)
args = ['x']
self.assertEqual(pyfunc(*args), cfunc(*args))
args = ['']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_literal_comparison(self):
def pyfunc(option):
x = 'a123'
y = 'aa12'
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_len(self):
def pyfunc():
return len('abc')
cfunc = njit(pyfunc)
self.assertEqual(pyfunc(), cfunc())
def test_literal_getitem(self):
def pyfunc(which):
return 'abc'[which]
cfunc = njit(pyfunc)
for a in [-1, 0, 1, slice(1, None), slice(None, -1)]:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_in(self):
def pyfunc(x):
return x in '9876zabiuh'
cfunc = njit(pyfunc)
for a in ['a', '9', '1', '', '8uha', '987']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_xyzwith(self):
def pyfunc(x, y):
return 'abc'.startswith(x), 'cde'.endswith(y)
cfunc = njit(pyfunc)
for args in permutations('abcdefg', r=2):
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_find(self):
def pyfunc(x):
return 'abc'.find(x), x.find('a')
cfunc = njit(pyfunc)
for a in ['ab']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_not(self):
def pyfunc(x):
return not x
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_capitalize(self):
def pyfunc(x):
return x.capitalize()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L800-L815 # noqa: E501
cpython = ['\U0001044F', '\U0001044F\U0001044F', '\U00010427\U0001044F',
'\U0001044F\U00010427', 'X\U00010427x\U0001044F', 'h\u0130',
'\u1fd2\u0130', 'finnish', 'A\u0345\u03a3']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L926 # noqa: E501
cpython_extras = ['\U00010000\U00100000']
msg = 'Results of "{}".capitalize() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isupper(self):
def pyfunc(x):
return x.isupper()
cfunc = njit(pyfunc)
uppers = [x.upper() for x in UNICODE_EXAMPLES]
extras = ["AA12A", "aa12a", "大AA12A", "大aa12a", "AAADŽA", "A 1 1 大"]
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L585-L599 # noqa: E501
cpython = ['\u2167', '\u2177', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F']
fourxcpy = [x * 4 for x in cpython]
for a in UNICODE_EXAMPLES + uppers + extras + cpython + fourxcpy:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_upper(self):
def pyfunc(x):
return x.upper()
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_casefold(self):
def pyfunc(x):
return x.casefold()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L774-L781 # noqa: E501
cpython = ['hello', 'hELlo', 'ß', 'fi', '\u03a3',
'A\u0345\u03a3', '\u00b5']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L924 # noqa: E501
cpython_extras = ['\U00010000\U00100000']
msg = 'Results of "{}".casefold() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isalpha(self):
def pyfunc(x):
return x.isalpha()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L630-L640 # noqa: E501
cpython = ['\u1FFc', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F']
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L738-L745 # noqa: E501
extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isalpha() must be equal'
for s in UNICODE_EXAMPLES + [''] + extras + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
@unittest.skipUnless(_py37_or_later,
'isascii method requires Python 3.7 or later')
def test_isascii(self):
def pyfunc(x):
return x.isascii()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/865c3b257fe38154a4320c7ee6afb416f665b9c2/Lib/test/string_tests.py#L913-L926 # noqa: E501
cpython = ['', '\x00', '\x7f', '\x00\x7f', '\x80', '\xe9', ' ']
msg = 'Results of "{}".isascii() must be equal'
for s in UNICODE_EXAMPLES + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_title(self):
pyfunc = title
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L813-L828 # noqa: E501
cpython = ['\U0001044F', '\U0001044F\U0001044F',
'\U0001044F\U0001044F \U0001044F\U0001044F',
'\U00010427\U0001044F \U00010427\U0001044F',
'\U0001044F\U00010427 \U0001044F\U00010427',
'X\U00010427x\U0001044F X\U00010427x\U0001044F',
'fiNNISH', 'A\u03a3 \u1fa1xy', 'A\u03a3A']
msg = 'Results of "{}".title() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_swapcase(self):
def pyfunc(x):
return x.swapcase()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L834-L858 # noqa: E501
cpython = ['\U0001044F', '\U00010427', '\U0001044F\U0001044F',
'\U00010427\U0001044F', '\U0001044F\U00010427',
'X\U00010427x\U0001044F', 'fi', '\u0130', '\u03a3',
'\u0345\u03a3', 'A\u0345\u03a3', 'A\u0345\u03a3a',
'A\u0345\u03a3', 'A\u03a3\u0345', '\u03a3\u0345 ',
'\u03a3', 'ß', '\u1fd2']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L928 # noqa: E501
cpython_extras = ['\U00010000\U00100000']
msg = 'Results of "{}".swapcase() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_islower(self):
pyfunc = islower_usecase
cfunc = njit(pyfunc)
lowers = [x.lower() for x in UNICODE_EXAMPLES]
extras = ['AA12A', 'aa12a', '大AA12A', '大aa12a', 'AAADŽA', 'A 1 1 大']
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L586-L600 # noqa: E501
cpython = ['\u2167', '\u2177', '\U00010401', '\U00010427',
'\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F']
cpython += [x * 4 for x in cpython]
msg = 'Results of "{}".islower() must be equal'
for s in UNICODE_EXAMPLES + lowers + [''] + extras + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isalnum(self):
def pyfunc(x):
return x.isalnum()
cfunc = njit(pyfunc)
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L624-L628 # noqa: E501
cpython = ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L738-L745 # noqa: E501
extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isalnum() must be equal'
for s in UNICODE_EXAMPLES + [''] + extras + cpython:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_lower(self):
pyfunc = lower_usecase
cfunc = njit(pyfunc)
extras = ['AA12A', 'aa12a', '大AA12A', '大aa12a', 'AAADŽA', 'A 1 1 大']
# Samples taken from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L748-L758 # noqa: E501
cpython = ['\U00010401', '\U00010427', '\U0001044E', '\U0001F46F',
'\U00010427\U00010427', '\U00010427\U0001044F',
'X\U00010427x\U0001044F', '\u0130']
# special cases for sigma from CPython testing:
# https://github.com/python/cpython/blob/201c8f79450628241574fba940e08107178dc3a5/Lib/test/test_unicode.py#L759-L768 # noqa: E501
sigma = ['\u03a3', '\u0345\u03a3', 'A\u0345\u03a3', 'A\u0345\u03a3a',
'\u03a3\u0345 ', '\U0008fffe', '\u2177']
extra_sigma = 'A\u03a3\u03a2'
sigma.append(extra_sigma)
msg = 'Results of "{}".lower() must be equal'
for s in UNICODE_EXAMPLES + [''] + extras + cpython + sigma:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isnumeric(self):
def pyfunc(x):
return x.isnumeric()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L676-L693 # noqa: E501
cpython = ['', 'a', '0', '\u2460', '\xbc', '\u0660', '0123456789',
'0123456789a', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F', '\U00011065',
'\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800', 'a\uD800b\uDFFFa',
'a\uDFFFb\uD800a']
msg = 'Results of "{}".isnumeric() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isdigit(self):
def pyfunc(x):
return x.isdigit()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L664-L674 # noqa: E501
cpython = ['\u2460', '\xbc', '\u0660', '\U00010401', '\U00010427',
'\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F',
'\U00011065', '\U0001D7F6', '\U00011066', '\U000104A0',
'\U0001F107']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a']
msg = 'Results of "{}".isdigit() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_isdecimal(self):
def pyfunc(x):
return x.isdecimal()
cfunc = njit(pyfunc)
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L646-L662 # noqa: E501
cpython = ['', 'a', '0', '\u2460', '\xbc', '\u0660', '0123456789',
'0123456789a', '\U00010401', '\U00010427', '\U00010429',
'\U0001044E', '\U0001F40D', '\U0001F46F', '\U00011065',
'\U0001F107', '\U0001D7F6', '\U00011066', '\U000104A0']
# https://github.com/python/cpython/blob/1d4b6ba19466aba0eb91c4ba01ba509acf18c723/Lib/test/test_unicode.py#L742-L749 # noqa: E501
cpython_extras = ['\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800', 'a\uD800b\uDFFFa',
'a\uDFFFb\uD800a']
msg = 'Results of "{}".isdecimal() must be equal'
for s in UNICODE_EXAMPLES + [''] + cpython + cpython_extras:
self.assertEqual(pyfunc(s), cfunc(s), msg=msg.format(s))
def test_replace(self):
pyfunc = replace_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '', 'A'),
('', '⚡', 'A'),
('abcabc', '⚡', 'A'),
('🐍⚡', '⚡', 'A'),
('🐍⚡🐍', '⚡', 'A'),
('abababa', 'a', 'A'),
('abababa', 'b', 'A'),
('abababa', 'c', 'A'),
('abababa', 'ab', 'A'),
('abababa', 'aba', 'A'),
]
for test_str, old_str, new_str in CASES:
self.assertEqual(pyfunc(test_str, old_str, new_str),
cfunc(test_str, old_str, new_str),
"'%s'.replace('%s', '%s')?" %
(test_str, old_str, new_str))
def test_replace_with_count(self):
pyfunc = replace_with_count_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '', 'A'),
('', '⚡', 'A'),
('abcabc', '⚡', 'A'),
('🐍⚡', '⚡', 'A'),
('🐍⚡🐍', '⚡', 'A'),
('abababa', 'a', 'A'),
('abababa', 'b', 'A'),
('abababa', 'c', 'A'),
('abababa', 'ab', 'A'),
('abababa', 'aba', 'A'),
]
count_test = [-1, 1, 0, 5]
for test_str, old_str, new_str in CASES:
for count in count_test:
self.assertEqual(pyfunc(test_str, old_str, new_str, count),
cfunc(test_str, old_str, new_str, count),
"'%s'.replace('%s', '%s', '%s')?" %
(test_str, old_str, new_str, count))
def test_replace_unsupported(self):
def pyfunc(s, x, y, count):
return s.replace(x, y, count)
cfunc = njit(pyfunc)
with self.assertRaises(TypingError) as raises:
cfunc('ababababab', 'ba', 'qqq', 3.5)
msg = 'Unsupported parameters. The parametrs must be Integer.'
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ababababab', 0, 'qqq', 3)
msg = 'The object must be a UnicodeType.'
self.assertIn(msg, str(raises.exception))
with self.assertRaises(TypingError) as raises:
cfunc('ababababab', 'ba', 0, 3)
msg = 'The object must be a UnicodeType.'
self.assertIn(msg, str(raises.exception))
class TestUnicodeInTuple(BaseTest):
def test_const_unicode_in_tuple(self):
# Issue 3673
@njit
def f():
return ('aa',) < ('bb',)
self.assertEqual(f.py_func(), f())
@njit
def f():
return ('cc',) < ('bb',)
self.assertEqual(f.py_func(), f())
def test_const_unicode_in_hetero_tuple(self):
@njit
def f():
return ('aa', 1) < ('bb', 1)
self.assertEqual(f.py_func(), f())
@njit
def f():
return ('aa', 1) < ('aa', 2)
self.assertEqual(f.py_func(), f())
def test_ascii_flag_unbox(self):
@njit
def f(s):
return s._is_ascii
for s in UNICODE_EXAMPLES:
self.assertEqual(f(s), isascii(s))
def test_ascii_flag_join(self):
@njit
def f():
s1 = 'abc'
s2 = '123'
s3 = '🐍⚡'
s4 = '大处着眼,小处着手。'
return (",".join([s1, s2])._is_ascii,
"🐍⚡".join([s1, s2])._is_ascii,
",".join([s1, s3])._is_ascii,
",".join([s3, s4])._is_ascii)
self.assertEqual(f(), (1, 0, 0, 0))
def test_ascii_flag_getitem(self):
@njit
def f():
s1 = 'abc123'
s2 = '🐍⚡🐍⚡🐍⚡'
return (s1[0]._is_ascii, s1[2:]._is_ascii, s2[0]._is_ascii,
s2[2:]._is_ascii)
self.assertEqual(f(), (1, 1, 0, 0))
def test_ascii_flag_add_mul(self):
@njit
def f():
s1 = 'abc'
s2 = '123'
s3 = '🐍⚡'
s4 = '大处着眼,小处着手。'
return ((s1 + s2)._is_ascii,
(s1 + s3)._is_ascii,
(s3 + s4)._is_ascii,
(s1 * 2)._is_ascii,
(s3 * 2)._is_ascii)
self.assertEqual(f(), (1, 0, 0, 1, 0))
class TestUnicodeIteration(BaseTest):
def test_unicode_iter(self):
pyfunc = iter_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_unicode_literal_iter(self):
pyfunc = literal_iter_usecase
cfunc = njit(pyfunc)
self.assertPreciseEqual(pyfunc(), cfunc())
def test_unicode_enumerate_iter(self):
pyfunc = enumerated_iter_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_unicode_stopiteration_iter(self):
self.disable_leak_check()
pyfunc = iter_stopiteration_usecase
cfunc = njit(pyfunc)
for f in (pyfunc, cfunc):
for a in UNICODE_EXAMPLES:
with self.assertRaises(StopIteration):
f(a)
def test_unicode_literal_stopiteration_iter(self):
pyfunc = literal_iter_stopiteration_usecase
cfunc = njit(pyfunc)
for f in (pyfunc, cfunc):
with self.assertRaises(StopIteration):
f()
class TestUnicodeAuxillary(BaseTest):
def test_ord(self):
pyfunc = ord_usecase
cfunc = njit(pyfunc)
for ex in UNICODE_EXAMPLES:
for a in ex:
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_ord_invalid(self):
self.disable_leak_check()
pyfunc = ord_usecase
cfunc = njit(pyfunc)
# wrong number of chars
for func in (pyfunc, cfunc):
for ch in ('', 'abc'):
with self.assertRaises(TypeError) as raises:
func(ch)
self.assertIn('ord() expected a character',
str(raises.exception))
# wrong type
with self.assertRaises(TypingError) as raises:
cfunc(1.23)
self.assertIn(_header_lead, str(raises.exception))
def test_chr(self):
pyfunc = chr_usecase
cfunc = njit(pyfunc)
for ex in UNICODE_EXAMPLES:
for x in ex:
a = ord(x)
self.assertPreciseEqual(pyfunc(a), cfunc(a))
# test upper/lower bounds
for a in (0x0, _MAX_UNICODE):
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_chr_invalid(self):
pyfunc = chr_usecase
cfunc = njit(pyfunc)
# value negative/>_MAX_UNICODE
for func in (pyfunc, cfunc):
for v in (-2, _MAX_UNICODE + 1):
with self.assertRaises(ValueError) as raises:
func(v)
self.assertIn("chr() arg not in range", str(raises.exception))
# wrong type
with self.assertRaises(TypingError) as raises:
cfunc('abc')
self.assertIn(_header_lead, str(raises.exception))
def test_unicode_type_mro(self):
# see issue #5635
def bar(x):
return True
@overload(bar)
def ol_bar(x):
ok = False
if isinstance(x, types.UnicodeType):
if isinstance(x, types.Hashable):
ok = True
return lambda x: ok
@njit
def foo(strinst):
return bar(strinst)
inst = "abc"
self.assertEqual(foo.py_func(inst), foo(inst))
self.assertIn(types.Hashable, types.unicode_type.__class__.__mro__)
def test_f_strings(self):
"""test f-string support, which requires bytecode handling
"""
# requires formatting (FORMAT_VALUE) and concatenation (BUILD_STRINGS)
def impl1(a):
return f"AA_{a+3}_B"
# does not require concatenation
def impl2(a):
return f"{a+2}"
# no expression
def impl3(a):
return f"ABC_{a}"
# format spec not allowed
def impl4(a):
return f"ABC_{a:0}"
# corner case: empty string
def impl5():
return f"" # noqa: F541
self.assertEqual(impl1(3), njit(impl1)(3))
self.assertEqual(impl2(2), njit(impl2)(2))
# string input
self.assertEqual(impl3("DE"), njit(impl3)("DE"))
# check error when input type doesn't have str() implementation
with self.assertRaises(TypingError) as raises:
njit(impl3)(["A", "B"])
msg = "No implementation of function Function(<class 'str'>)"
self.assertIn(msg, str(raises.exception))
# check error when format spec provided
with self.assertRaises(UnsupportedError) as raises:
njit(impl4)(["A", "B"])
msg = "format spec in f-strings not supported yet"
self.assertIn(msg, str(raises.exception))
self.assertEqual(impl5(), njit(impl5)())
if __name__ == '__main__':
unittest.main()
| cpcloud/numba | numba/tests/test_unicode.py | Python | bsd-2-clause | 93,714 |
import datetime
import json
import time
import urllib
import urlparse
from django.contrib.humanize.templatetags import humanize
from django.contrib.staticfiles.storage import staticfiles_storage
from django.template import defaultfilters
from django.utils.encoding import smart_str
from django.utils.html import strip_tags
from django.utils.translation import ugettext_lazy as _lazy
import jinja2
from jingo import register
from product_details import product_details
from fjord.base.urlresolvers import reverse
# Yanking filters from Django.
register.filter(strip_tags)
register.filter(defaultfilters.timesince)
register.filter(defaultfilters.truncatewords)
@register.function
def thisyear():
"""The current year."""
return jinja2.Markup(datetime.date.today().year)
@register.function
def url(viewname, *args, **kwargs):
"""Helper for Django's ``reverse`` in templates."""
return reverse(viewname, args=args, kwargs=kwargs)
@register.filter
def urlparams(url_, hash=None, **query):
"""Add a fragment and/or query paramaters to a URL.
New query params will be appended to exising parameters, except duplicate
names, which will be replaced.
"""
url = urlparse.urlparse(url_)
fragment = hash if hash is not None else url.fragment
# Use dict(parse_qsl) so we don't get lists of values.
q = url.query
query_dict = dict(urlparse.parse_qsl(smart_str(q))) if q else {}
query_dict.update((k, v) for k, v in query.items())
query_string = _urlencode([(k, v) for k, v in query_dict.items()
if v is not None])
new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
query_string, fragment)
return new.geturl()
def _urlencode(items):
"""A Unicode-safe URLencoder."""
try:
return urllib.urlencode(items)
except UnicodeEncodeError:
return urllib.urlencode([(k, smart_str(v)) for k, v in items])
@register.filter
def urlencode(txt):
"""Url encode a path."""
if isinstance(txt, unicode):
txt = txt.encode('utf-8')
return urllib.quote_plus(txt)
@register.function
def static(path):
return staticfiles_storage.url(path)
@register.filter
def naturaltime(*args):
return humanize.naturaltime(*args)
def json_handle_datetime(obj):
"""Convert a datetime obj to a number of milliseconds since epoch.
This uses milliseconds since this is probably going to be used to
feed JS, and JS timestamps use milliseconds.
"""
try:
return time.mktime(obj.timetuple()) * 1000
except AttributeError:
return obj
@register.filter
def to_json(data):
return json.dumps(data, default=json_handle_datetime)
@register.filter
def locale_name(locale, native=False, default=_lazy(u'Unknown')):
"""Convert a locale code into a human readable locale name"""
if locale in product_details.languages:
display_locale = 'native' if native else 'English'
return product_details.languages[locale][display_locale]
else:
return default
@register.function
def date_ago(days=0):
now = datetime.datetime.now()
diff = datetime.timedelta(days=days)
return (now - diff).date()
@register.function
def to_datetime_string(dt):
"""Converts date/datetime to '%Y-%m-%dT%H:%M:%S'"""
return dt.strftime('%Y-%m-%dT%H:%M:%S')
@register.function
def to_date_string(dt):
"""Converts date/datetime to '%Y-%m-%d'"""
return dt.strftime('%Y-%m-%d')
@register.function
def displayname(user):
"""Returns the best display name for the user"""
return user.first_name or user.email
| staranjeet/fjord | fjord/base/helpers.py | Python | bsd-3-clause | 3,652 |
from .db_settings import get_model_indexes
from djangotoolbox.db.creation import NonrelDatabaseCreation
class StringType(object):
def __init__(self, internal_type):
self.internal_type = internal_type
def __mod__(self, field):
indexes = get_model_indexes(field['model'])
if field['name'] in indexes['indexed']:
return 'text'
elif field['name'] in indexes['unindexed']:
return 'longtext'
return self.internal_type
def get_data_types():
# TODO: Add GAEKeyField and a corresponding db_type
string_types = ('text', 'longtext')
data_types = NonrelDatabaseCreation.data_types.copy()
for name, field_type in data_types.items():
if field_type in string_types:
data_types[name] = StringType(field_type)
return data_types
class DatabaseCreation(NonrelDatabaseCreation):
# This dictionary maps Field objects to their associated GAE column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = get_data_types()
def create_test_db(self, *args, **kw):
"""Destroys the test datastore. A new store will be recreated on demand"""
self.destroy_test_db()
self.connection.use_test_datastore = True
self.connection.flush()
def destroy_test_db(self, *args, **kw):
"""Destroys the test datastore files."""
from .base import destroy_datastore, get_test_datastore_paths
destroy_datastore(*get_test_datastore_paths())
| rimbalinux/MSISDNArea | djangoappengine/db/creation.py | Python | bsd-3-clause | 1,719 |
"""
pytest hooks and fixtures used for our unittests.
Please note that there should not be any Django/Olympia related imports
on module-level, they should instead be added to hooks or fixtures directly.
"""
import os
import uuid
import warnings
import pytest
import responses
import six
@pytest.fixture(autouse=True)
def unpin_db(request):
"""Unpin the database from master in the current DB.
The `multidb` middleware pins the current thread to master for 15 seconds
after any POST request, which can lead to unexpected results for tests
of DB slave functionality."""
from multidb import pinning
request.addfinalizer(pinning.unpin_this_thread)
@pytest.fixture(autouse=True)
def mock_elasticsearch():
"""Mock ElasticSearch in tests by default.
Tests that do need ES should inherit from ESTestCase, which will stop the
mock at setup time."""
from olympia.amo.tests import start_es_mocks, stop_es_mocks
start_es_mocks()
yield
stop_es_mocks()
@pytest.fixture(autouse=True)
def start_responses_mocking(request):
"""Enable ``responses`` this enforcing us to explicitly mark tests
that require internet usage.
"""
marker = request.node.get_closest_marker('allow_external_http_requests')
if not marker:
responses.start()
yield
try:
if not marker:
responses.stop()
responses.reset()
except RuntimeError:
# responses patcher was already uninstalled
pass
@pytest.fixture(autouse=True)
def mock_basket(settings):
"""Mock Basket in tests by default.
Tests that do need basket to work should disable `responses`
and add a passthrough.
"""
USER_TOKEN = u'13f64f64-1de7-42f6-8c7f-a19e2fae5021'
responses.add(
responses.GET,
settings.BASKET_URL + '/news/lookup-user/',
json={'status': 'ok', 'newsletters': [], 'token': USER_TOKEN})
responses.add(
responses.POST,
settings.BASKET_URL + '/news/subscribe/',
json={'status': 'ok', 'token': USER_TOKEN})
responses.add(
responses.POST,
settings.BASKET_URL + '/news/unsubscribe/{}/'.format(USER_TOKEN),
json={'status': 'ok', 'token': USER_TOKEN})
def pytest_configure(config):
import django
# Forcefully call `django.setup`, pytest-django tries to be very lazy
# and doesn't call it if it has already been setup.
# That is problematic for us since we overwrite our logging config
# in settings_test and it can happen that django get's initialized
# with the wrong configuration. So let's forcefully re-initialize
# to setup the correct logging config since at this point
# DJANGO_SETTINGS_MODULE should be `settings_test` every time.
django.setup()
from olympia.amo.tests import prefix_indexes
prefix_indexes(config)
@pytest.fixture(autouse=True, scope='session')
def instrument_jinja():
"""Make sure the "templates" list in a response is properly updated, even
though we're using Jinja2 and not the default django template engine."""
import jinja2
from django import test
old_render = jinja2.Template.render
def instrumented_render(self, *args, **kwargs):
context = dict(*args, **kwargs)
test.signals.template_rendered.send(
sender=self, template=self, context=context)
return old_render(self, *args, **kwargs)
jinja2.Template.render = instrumented_render
def default_prefixer(settings):
"""Make sure each test starts with a default URL prefixer."""
from django import http
from olympia import amo
request = http.HttpRequest()
request.META['SCRIPT_NAME'] = ''
prefixer = amo.urlresolvers.Prefixer(request)
prefixer.app = settings.DEFAULT_APP
prefixer.locale = settings.LANGUAGE_CODE
amo.urlresolvers.set_url_prefix(prefixer)
@pytest.yield_fixture(autouse=True)
def test_pre_setup(request, tmpdir, settings):
from django.core.cache import caches
from django.utils import translation
from olympia import amo, core
from olympia.translations.hold import clean_translations
from waffle.utils import get_cache as waffle_get_cache
from waffle import models as waffle_models
# Ignore ResourceWarning for now. It's a Python 3 thing so it's done
# dynamically here.
if six.PY3:
warnings.filterwarnings('ignore', category=ResourceWarning) # noqa
# Clear all cache-instances. They'll be re-initialized by Django
# This will make sure that our random `KEY_PREFIX` is applied
# appropriately.
# This is done by Django too whenever `settings` is changed
# directly but because we're using the `settings` fixture
# here this is not detected correctly.
caches._caches.caches = {}
# Randomize the cache key prefix to keep
# tests isolated from each other.
prefix = uuid.uuid4().hex
settings.CACHES['default']['KEY_PREFIX'] = 'amo:{0}:'.format(prefix)
# Reset global django-waffle cache instance to make sure it's properly
# using our new key prefix
waffle_models.cache = waffle_get_cache()
translation.trans_real.deactivate()
# Django fails to clear this cache.
translation.trans_real._translations = {}
translation.trans_real.activate(settings.LANGUAGE_CODE)
def _path(*args):
path = str(os.path.join(*args))
if not os.path.exists(path):
os.makedirs(path)
return path
settings.STORAGE_ROOT = storage_root = _path(str(tmpdir.mkdir('storage')))
settings.SHARED_STORAGE = shared_storage = _path(
storage_root, 'shared_storage')
settings.ADDONS_PATH = _path(storage_root, 'files')
settings.GUARDED_ADDONS_PATH = _path(storage_root, 'guarded-addons')
settings.GIT_FILE_STORAGE_PATH = _path(storage_root, 'git-storage')
settings.MEDIA_ROOT = _path(shared_storage, 'uploads')
settings.TMP_PATH = _path(shared_storage, 'tmp')
# Reset the prefixer and urlconf after updating media root
default_prefixer(settings)
from django.urls import clear_url_caches, set_urlconf
def _clear_urlconf():
clear_url_caches()
set_urlconf(None)
_clear_urlconf()
request.addfinalizer(_clear_urlconf)
yield
core.set_user(None)
clean_translations(None) # Make sure queued translations are removed.
# Make sure we revert everything we might have changed to prefixers.
amo.urlresolvers.clean_url_prefixes()
@pytest.fixture
def admin_group(db):
"""Create the Admins group."""
from olympia.access.models import Group
return Group.objects.create(name='Admins', rules='*:*')
@pytest.fixture
def mozilla_user(admin_group, settings):
"""Create a "Mozilla User"."""
from olympia.access.models import GroupUser
from olympia.users.models import UserProfile
user = UserProfile.objects.create(pk=settings.TASK_USER_ID,
email='admin@mozilla.com',
username='admin')
user.save()
GroupUser.objects.create(user=user, group=admin_group)
return user
| kumar303/addons-server | conftest.py | Python | bsd-3-clause | 7,121 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.utils.tests.test_metadata import MetaBaseTest
import operator
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from astropy.tests.helper import assert_follows_unicode_guidelines
from astropy import table
from astropy import time
from astropy import units as u
class TestColumn():
def test_subclass(self, Column):
c = Column(name='a')
assert isinstance(c, np.ndarray)
c2 = c * 2
assert isinstance(c2, Column)
assert isinstance(c2, np.ndarray)
def test_numpy_ops(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for op, test_equal in ((operator.eq, True),
(operator.ne, False),
(operator.ge, True),
(operator.gt, False),
(operator.le, True),
(operator.lt, False)):
for eq in (op(c, arr), op(arr, c)):
assert np.all(eq) if test_equal else not np.any(eq)
assert len(eq) == 3
if Column is table.Column:
assert type(eq) == np.ndarray
else:
assert type(eq) == np.ma.core.MaskedArray
assert eq.dtype.str == '|b1'
lt = c - 1 < arr
assert np.all(lt)
def test_numpy_boolean_ufuncs(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for ufunc, test_true in ((np.isfinite, True),
(np.isinf, False),
(np.isnan, False),
(np.sign, True),
(np.signbit, False)):
result = ufunc(c)
assert len(result) == len(c)
assert np.all(result) if test_true else not np.any(result)
if Column is table.Column:
assert type(result) == np.ndarray
else:
assert type(result) == np.ma.core.MaskedArray
if ufunc is not np.sign:
assert result.dtype.str == '|b1'
def test_view(self, Column):
c = np.array([1, 2, 3], dtype=np.int64).view(Column)
assert repr(c) == f"<{Column.__name__} dtype='int64' length=3>\n1\n2\n3"
def test_format(self, Column):
"""Show that the formatted output from str() works"""
from astropy import conf
with conf.set_temp('max_lines', 8):
c1 = Column(np.arange(2000), name='a', dtype=float,
format='%6.2f')
assert str(c1).splitlines() == [' a ',
'-------',
' 0.00',
' 1.00',
' ...',
'1998.00',
'1999.00',
'Length = 2000 rows']
def test_convert_numpy_array(self, Column):
d = Column([1, 2, 3], name='a', dtype='i8')
np_data = np.array(d)
assert np.all(np_data == d)
np_data = np.array(d, copy=False)
assert np.all(np_data == d)
np_data = np.array(d, dtype='i4')
assert np.all(np_data == d)
def test_convert_unit(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
d.convert_unit_to("km")
assert np.all(d.data == [0.001, 0.002, 0.003])
def test_array_wrap(self):
"""Test that the __array_wrap__ method converts a reduction ufunc
output that has a different shape into an ndarray view. Without this a
method call like c.mean() returns a Column array object with length=1."""
# Mean and sum for a 1-d float column
c = table.Column(name='a', data=[1., 2., 3.])
assert np.allclose(c.mean(), 2.0)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 6.)
assert isinstance(c.sum(), (np.floating, float))
# Non-reduction ufunc preserves Column class
assert isinstance(np.cos(c), table.Column)
# Sum for a 1-d int column
c = table.Column(name='a', data=[1, 2, 3])
assert np.allclose(c.sum(), 6)
assert isinstance(c.sum(), (np.integer, int))
# Sum for a 2-d int column
c = table.Column(name='a', data=[[1, 2, 3],
[4, 5, 6]])
assert c.sum() == 21
assert isinstance(c.sum(), (np.integer, int))
assert np.all(c.sum(axis=0) == [5, 7, 9])
assert c.sum(axis=0).shape == (3,)
assert isinstance(c.sum(axis=0), np.ndarray)
# Sum and mean for a 1-d masked column
c = table.MaskedColumn(name='a', data=[1., 2., 3.], mask=[0, 0, 1])
assert np.allclose(c.mean(), 1.5)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 3.)
assert isinstance(c.sum(), (np.floating, float))
def test_name_none(self, Column):
"""Can create a column without supplying name, which defaults to None"""
c = Column([1, 2])
assert c.name is None
assert np.all(c == np.array([1, 2]))
def test_quantity_init(self, Column):
c = Column(data=np.array([1, 2, 3]) * u.m)
assert np.all(c.data == np.array([1, 2, 3]))
assert np.all(c.unit == u.m)
c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm)
assert np.all(c.data == np.array([100, 200, 300]))
assert np.all(c.unit == u.cm)
def test_quantity_comparison(self, Column):
# regression test for gh-6532
c = Column([1, 2100, 3], unit='Hz')
q = 2 * u.kHz
check = c < q
assert np.all(check == [True, False, True])
# This already worked, but just in case.
check = q >= c
assert np.all(check == [True, False, True])
def test_attrs_survive_getitem_after_change(self, Column):
"""
Test for issue #3023: when calling getitem with a MaskedArray subclass
the original object attributes are not copied.
"""
c1 = Column([1, 2, 3], name='a', unit='m', format='%i',
description='aa', meta={'a': 1})
c1.name = 'b'
c1.unit = 'km'
c1.format = '%d'
c1.description = 'bb'
c1.meta = {'bbb': 2}
for item in (slice(None, None), slice(None, 1), np.array([0, 2]),
np.array([False, True, False])):
c2 = c1[item]
assert c2.name == 'b'
assert c2.unit is u.km
assert c2.format == '%d'
assert c2.description == 'bb'
assert c2.meta == {'bbb': 2}
# Make sure that calling getitem resulting in a scalar does
# not copy attributes.
val = c1[1]
for attr in ('name', 'unit', 'format', 'description', 'meta'):
assert not hasattr(val, attr)
def test_to_quantity(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
assert np.all(d.quantity == ([1, 2, 3.] * u.m))
assert np.all(d.quantity.value == ([1, 2, 3.] * u.m).value)
assert np.all(d.quantity == d.to('m'))
assert np.all(d.quantity.value == d.to('m').value)
np.testing.assert_allclose(d.to(u.km).value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to('km').value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to(u.MHz, u.equivalencies.spectral()).value,
[299.792458, 149.896229, 99.93081933])
d_nounit = Column([1, 2, 3], name='a', dtype="f8", unit=None)
with pytest.raises(u.UnitsError):
d_nounit.to(u.km)
assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3]))
# make sure the correct copy/no copy behavior is happening
q = [1, 3, 5] * u.km
# to should always make a copy
d.to(u.km)[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# explicit copying of the quantity should not change the column
d.quantity.copy()[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# but quantity directly is a "view", accessing the underlying column
d.quantity[:] = q
np.testing.assert_allclose(d, [1000, 3000, 5000])
# view should also work for integers
d2 = Column([1, 2, 3], name='a', dtype=int, unit="m")
d2.quantity[:] = q
np.testing.assert_allclose(d2, [1000, 3000, 5000])
# but it should fail for strings or other non-numeric tables
d3 = Column(['arg', 'name', 'stuff'], name='a', unit="m")
with pytest.raises(TypeError):
d3.quantity
def test_to_funcunit_quantity(self, Column):
"""
Tests for #8424, check if function-unit can be retrieved from column.
"""
d = Column([1, 2, 3], name='a', dtype="f8", unit="dex(AA)")
assert np.all(d.quantity == ([1, 2, 3] * u.dex(u.AA)))
assert np.all(d.quantity.value == ([1, 2, 3] * u.dex(u.AA)).value)
assert np.all(d.quantity == d.to("dex(AA)"))
assert np.all(d.quantity.value == d.to("dex(AA)").value)
# make sure, casting to linear unit works
q = [10, 100, 1000] * u.AA
np.testing.assert_allclose(d.to(u.AA), q)
def test_item_access_type(self, Column):
"""
Tests for #3095, which forces integer item access to always return a plain
ndarray or MaskedArray, even in the case of a multi-dim column.
"""
integer_types = (int, np.int_)
for int_type in integer_types:
c = Column([[1, 2], [3, 4]])
i0 = int_type(0)
i1 = int_type(1)
assert np.all(c[i0] == [1, 2])
assert type(c[i0]) == (np.ma.MaskedArray if hasattr(Column, 'mask') else np.ndarray)
assert c[i0].shape == (2,)
c01 = c[i0:i1]
assert np.all(c01 == [[1, 2]])
assert isinstance(c01, Column)
assert c01.shape == (1, 2)
c = Column([1, 2])
assert np.all(c[i0] == 1)
assert isinstance(c[i0], np.integer)
assert c[i0].shape == ()
c01 = c[i0:i1]
assert np.all(c01 == [1])
assert isinstance(c01, Column)
assert c01.shape == (1,)
def test_insert_basic(self, Column):
c = Column([0, 1, 2], name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1 == [0, 100, 1, 2])
assert c1.attrs_equal(c)
assert type(c) is type(c1)
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
c1 = c.insert(-1, 100)
assert np.all(c1 == [0, 1, 100, 2])
c1 = c.insert(3, 100)
assert np.all(c1 == [0, 1, 2, 100])
c1 = c.insert(-3, 100)
assert np.all(c1 == [100, 0, 1, 2])
c1 = c.insert(1, [100, 200, 300])
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
# Out of bounds index
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(-4, 100)
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(4, 100)
def test_insert_axis(self, Column):
"""Insert with non-default axis kwarg"""
c = Column([[1, 2], [3, 4]])
c1 = c.insert(1, [5, 6], axis=None)
assert np.all(c1 == [1, 5, 6, 2, 3, 4])
c1 = c.insert(1, [5, 6], axis=1)
assert np.all(c1 == [[1, 5, 2], [3, 6, 4]])
def test_insert_string_expand(self, Column):
c = Column(['a', 'b'])
c1 = c.insert(0, 'abc')
assert np.all(c1 == ['abc', 'a', 'b'])
c = Column(['a', 'b'])
c1 = c.insert(0, ['c', 'def'])
assert np.all(c1 == ['c', 'def', 'a', 'b'])
def test_insert_string_masked_values(self):
c = table.MaskedColumn(['a', 'b'])
c1 = c.insert(0, np.ma.masked)
assert np.all(c1 == ['', 'a', 'b'])
assert np.all(c1.mask == [True, False, False])
assert c1.dtype == 'U1'
c2 = c.insert(1, np.ma.MaskedArray(['ccc', 'dd'], mask=[True, False]))
assert np.all(c2 == ['a', 'ccc', 'dd', 'b'])
assert np.all(c2.mask == [False, True, False, False])
assert c2.dtype == 'U3'
def test_insert_string_type_error(self, Column):
c = Column([1, 2])
with pytest.raises(ValueError, match='invalid literal for int'):
c.insert(0, 'string')
c = Column(['a', 'b'])
with pytest.raises(TypeError, match='string operation on non-string array'):
c.insert(0, 1)
def test_insert_multidim(self, Column):
c = Column([[1, 2],
[3, 4]], name='a', dtype=int)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == [[1, 2], [100, 200], [3, 4]])
# Broadcast
c1 = c.insert(1, 100)
assert np.all(c1 == [[1, 2], [100, 100], [3, 4]])
# Wrong shape
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200, 300])
def test_insert_object(self, Column):
c = Column(['a', 1, None], name='a', dtype=object)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == np.array(['a', [100, 200], 1, None],
dtype=object))
def test_insert_masked(self):
c = table.MaskedColumn([0, 1, 2], name='a', fill_value=9999,
mask=[False, True, False])
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert c1.fill_value == 9999
assert np.all(c1.data.mask == [False, False, True, False])
assert type(c) is type(c1)
for mask in (False, True):
c1 = c.insert(1, 100, mask=mask)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert np.all(c1.data.mask == [False, mask, True, False])
def test_masked_multidim_as_list(self):
data = np.ma.MaskedArray([1, 2], mask=[True, False])
c = table.MaskedColumn([data])
assert c.shape == (1, 2)
assert np.all(c[0].mask == [True, False])
def test_insert_masked_multidim(self):
c = table.MaskedColumn([[1, 2],
[3, 4]], name='a', dtype=int)
c1 = c.insert(1, [100, 200], mask=True)
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]])
c1 = c.insert(1, [100, 200], mask=[True, False])
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]])
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200], mask=[True, False, True])
def test_mask_on_non_masked_table(self):
"""
When table is not masked and trying to set mask on column then
it's Raise AttributeError.
"""
t = table.Table([[1, 2], [3, 4]], names=('a', 'b'), dtype=('i4', 'f8'))
with pytest.raises(AttributeError):
t['a'].mask = [True, False]
class TestAttrEqual():
"""Bunch of tests originally from ATpy that test the attrs_equal method."""
def test_5(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy')
c2 = Column(name='a', dtype=int, unit='mJy')
assert c1.attrs_equal(c2)
def test_6(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
def test_7(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='b', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_8(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=float, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_9(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='erg.cm-2.s-1.Hz-1', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_10(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%g',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_11(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='another test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_12(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'e': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_13(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 9, 'd': 12})
assert not c1.attrs_equal(c2)
def test_col_and_masked_col(self):
c1 = table.Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = table.MaskedColumn(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
assert c2.attrs_equal(c1)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
class TestMetaColumn(MetaBaseTest):
test_class = table.Column
args = ()
class TestMetaMaskedColumn(MetaBaseTest):
test_class = table.MaskedColumn
args = ()
def test_getitem_metadata_regression():
"""
Regression test for #1471: MaskedArray does not call __array_finalize__ so
the meta-data was not getting copied over. By overloading _update_from we
are able to work around this bug.
"""
# Make sure that meta-data gets propagated with __getitem__
c = table.Column(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
c = table.MaskedColumn(data=[1, 2], name='a', description='b',
unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
# As above, but with take() - check the method and the function
c = table.Column(data=[1, 2, 3], name='a', description='b',
unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.Column)
c = table.MaskedColumn(data=[1, 2, 3], name='a', description='b',
unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.MaskedColumn)
def test_unicode_guidelines():
arr = np.array([1, 2, 3])
c = table.Column(arr, name='a')
assert_follows_unicode_guidelines(c)
def test_scalar_column():
"""
Column is not designed to hold scalars, but for numpy 1.6 this can happen:
>> type(np.std(table.Column([1, 2])))
astropy.table.column.Column
"""
c = table.Column(1.5)
assert repr(c) == '1.5'
assert str(c) == '1.5'
def test_qtable_column_conversion():
"""
Ensures that a QTable that gets assigned a unit switches to be Quantity-y
"""
qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f'])
assert isinstance(qtab['i'], table.column.Column)
assert isinstance(qtab['f'], table.column.Column)
qtab['i'].unit = 'km/s'
assert isinstance(qtab['i'], u.Quantity)
assert isinstance(qtab['f'], table.column.Column)
# should follow from the above, but good to make sure as a #4497 regression test
assert isinstance(qtab['i'][0], u.Quantity)
assert isinstance(qtab[0]['i'], u.Quantity)
assert not isinstance(qtab['f'][0], u.Quantity)
assert not isinstance(qtab[0]['f'], u.Quantity)
# Regression test for #5342: if a function unit is assigned, the column
# should become the appropriate FunctionQuantity subclass.
qtab['f'].unit = u.dex(u.cm / u.s**2)
assert isinstance(qtab['f'], u.Dex)
@pytest.mark.parametrize('masked', [True, False])
def test_string_truncation_warning(masked):
"""
Test warnings associated with in-place assignment to a string
column that results in truncation of the right hand side.
"""
from inspect import currentframe, getframeinfo
t = table.Table([['aa', 'bb']], names=['a'], masked=masked)
t['a'][1] = 'cc'
t['a'][:] = 'dd'
with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '
r'string\(s\) longer than 2 character\(s\)') as w:
frameinfo = getframeinfo(currentframe())
t['a'][0] = 'eee' # replace item with string that gets truncated
assert t['a'][0] == 'ee'
assert len(w) == 1
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert 'test_column' in w[0].filename
with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '
r'string\(s\) longer than 2 character\(s\)') as w:
t['a'][:] = ['ff', 'ggg'] # replace item with string that gets truncated
assert np.all(t['a'] == ['ff', 'gg'])
assert len(w) == 1
# Test the obscure case of assigning from an array that was originally
# wider than any of the current elements (i.e. dtype is U4 but actual
# elements are U1 at the time of assignment).
val = np.array(['ffff', 'gggg'])
val[:] = ['f', 'g']
t['a'][:] = val
assert np.all(t['a'] == ['f', 'g'])
def test_string_truncation_warning_masked():
"""
Test warnings associated with in-place assignment to a string
to a masked column, specifically where the right hand side
contains np.ma.masked.
"""
# Test for strings, but also cover assignment of np.ma.masked to
# int and float masked column setting. This was previously only
# covered in an unrelated io.ascii test (test_line_endings) which
# showed an unexpected difference between handling of str and numeric
# masked arrays.
for values in (['a', 'b'], [1, 2], [1.0, 2.0]):
mc = table.MaskedColumn(values)
mc[1] = np.ma.masked
assert np.all(mc.mask == [False, True])
mc[:] = np.ma.masked
assert np.all(mc.mask == [True, True])
mc = table.MaskedColumn(['aa', 'bb'])
with pytest.warns(table.StringTruncateWarning, match=r'truncated right side '
r'string\(s\) longer than 2 character\(s\)') as w:
mc[:] = [np.ma.masked, 'ggg'] # replace item with string that gets truncated
assert mc[1] == 'gg'
assert np.all(mc.mask == [True, False])
assert len(w) == 1
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_create_from_str(Column):
"""
Create a bytestring Column from strings (including unicode) in Py3.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = 'bä'
c = Column([uba, 'def'], dtype='S')
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_bytes_obj(Column):
"""
Create a Column of dtype object with bytestring in it and make sure
it keeps the bytestring and not convert to str with accessed.
"""
c = Column([None, b'def'])
assert c.dtype.char == 'O'
assert not c[0]
assert c[1] == b'def'
assert isinstance(c[1], bytes)
assert not isinstance(c[1], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([None, b'def']))
assert not np.all(c[:2] == np.array([None, 'def']))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_bytes(Column):
"""
Create a bytestring Column from bytes and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = 'bä'
uba8 = uba.encode('utf-8')
c = Column([uba8, b'def'])
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'S'
# Array / list comparisons
assert np.all(c == [uba, 'def'])
ok = c == [uba8, b'def']
assert type(ok) is type(c.data) # noqa
assert ok.dtype.char == '?'
assert np.all(ok)
assert np.all(c == np.array([uba, 'def']))
assert np.all(c == np.array([uba8, b'def']))
# Scalar compare
cmps = (uba, uba8)
for cmp in cmps:
ok = c == cmp
assert type(ok) is type(c.data) # noqa
assert np.all(ok == [True, False])
def test_col_unicode_sandwich_unicode():
"""
Sanity check that Unicode Column behaves normally.
"""
uba = 'bä'
uba8 = uba.encode('utf-8')
c = table.Column([uba, 'def'], dtype='U')
assert c[0] == uba
assert isinstance(c[:0], table.Column)
assert isinstance(c[0], str)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'U'
ok = c == [uba, 'def']
assert type(ok) == np.ndarray
assert ok.dtype.char == '?'
assert np.all(ok)
assert np.all(c != [uba8, b'def'])
def test_masked_col_unicode_sandwich():
"""
Create a bytestring MaskedColumn and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
c = table.MaskedColumn([b'abc', b'def'])
c[1] = np.ma.masked
assert isinstance(c[:0], table.MaskedColumn)
assert isinstance(c[0], str)
assert c[0] == 'abc'
assert c[1] is np.ma.masked
assert isinstance(c[:], table.MaskedColumn)
assert c[:].dtype.char == 'S'
ok = c == ['abc', 'def']
assert ok[0] == True # noqa
assert ok[1] is np.ma.masked
assert np.all(c == [b'abc', b'def'])
assert np.all(c == np.array(['abc', 'def']))
assert np.all(c == np.array([b'abc', b'def']))
for cmp in ('abc', b'abc'):
ok = c == cmp
assert type(ok) is np.ma.MaskedArray
assert ok[0] == True # noqa
assert ok[1] is np.ma.masked
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_unicode_sandwich_set(Column):
"""
Test setting
"""
uba = 'bä'
c = Column([b'abc', b'def'])
c[0] = b'aa'
assert np.all(c == ['aa', 'def'])
c[0] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding
assert np.all(c == [uba, 'def'])
assert c.pformat() == ['None', '----', ' ' + uba, ' def']
c[:] = b'cc'
assert np.all(c == ['cc', 'cc'])
c[:] = uba
assert np.all(c == [uba, uba])
c[:] = ''
c[:] = [uba, b'def']
assert np.all(c == [uba, b'def'])
@pytest.mark.parametrize('class1', [table.MaskedColumn, table.Column])
@pytest.mark.parametrize('class2', [table.MaskedColumn, table.Column, str, list])
def test_unicode_sandwich_compare(class1, class2):
"""Test that comparing a bytestring Column/MaskedColumn with various
str (unicode) object types gives the expected result. Tests #6838.
"""
obj1 = class1([b'a', b'c'])
if class2 is str:
obj2 = 'a'
elif class2 is list:
obj2 = ['a', 'b']
else:
obj2 = class2(['a', 'b'])
assert np.all((obj1 == obj2) == [True, False])
assert np.all((obj2 == obj1) == [True, False])
assert np.all((obj1 != obj2) == [False, True])
assert np.all((obj2 != obj1) == [False, True])
assert np.all((obj1 > obj2) == [False, True])
assert np.all((obj2 > obj1) == [False, False])
assert np.all((obj1 <= obj2) == [True, False])
assert np.all((obj2 <= obj1) == [True, True])
assert np.all((obj1 < obj2) == [False, False])
assert np.all((obj2 < obj1) == [False, True])
assert np.all((obj1 >= obj2) == [True, True])
assert np.all((obj2 >= obj1) == [True, False])
def test_unicode_sandwich_masked_compare():
"""Test the fix for #6839 from #6899."""
c1 = table.MaskedColumn(['a', 'b', 'c', 'd'],
mask=[True, False, True, False])
c2 = table.MaskedColumn([b'a', b'b', b'c', b'd'],
mask=[True, True, False, False])
for cmp in ((c1 == c2), (c2 == c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert cmp[3]
for cmp in ((c1 != c2), (c2 != c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert not cmp[3]
# Note: comparisons <, >, >=, <= fail to return a masked array entirely,
# see https://github.com/numpy/numpy/issues/10092.
def test_structured_masked_column_roundtrip():
mc = table.MaskedColumn([(1., 2.), (3., 4.)],
mask=[(False, False), (False, False)], dtype='f8,f8')
assert len(mc.dtype.fields) == 2
mc2 = table.MaskedColumn(mc)
assert_array_equal(mc2, mc)
@pytest.mark.parametrize('dtype', ['i4,f4', 'f4,(2,)f8'])
def test_structured_empty_column_init(dtype):
dtype = np.dtype(dtype)
c = table.Column(length=5, shape=(2,), dtype=dtype)
assert c.shape == (5, 2)
assert c.dtype == dtype
def test_column_value_access():
"""Can a column's underlying data consistently be accessed via `.value`,
whether it is a `Column`, `MaskedColumn`, `Quantity`, or `Time`?"""
data = np.array([1, 2, 3])
tbl = table.QTable({'a': table.Column(data),
'b': table.MaskedColumn(data),
'c': u.Quantity(data),
'd': time.Time(data, format='mjd')})
assert type(tbl['a'].value) == np.ndarray
assert type(tbl['b'].value) == np.ma.MaskedArray
assert type(tbl['c'].value) == np.ndarray
assert type(tbl['d'].value) == np.ndarray
def test_masked_column_serialize_method_propagation():
mc = table.MaskedColumn([1., 2., 3.], mask=[True, False, True])
assert mc.info.serialize_method['ecsv'] == 'null_value'
mc.info.serialize_method['ecsv'] = 'data_mask'
assert mc.info.serialize_method['ecsv'] == 'data_mask'
mc2 = mc.copy()
assert mc2.info.serialize_method['ecsv'] == 'data_mask'
mc3 = table.MaskedColumn(mc)
assert mc3.info.serialize_method['ecsv'] == 'data_mask'
mc4 = mc.view(table.MaskedColumn)
assert mc4.info.serialize_method['ecsv'] == 'data_mask'
mc5 = mc[1:]
assert mc5.info.serialize_method['ecsv'] == 'data_mask'
@pytest.mark.parametrize('dtype', ['S', 'U', 'i'])
def test_searchsorted(Column, dtype):
c = Column([1, 2, 2, 3], dtype=dtype)
if isinstance(Column, table.MaskedColumn):
# Searchsorted seems to ignore the mask
c[2] = np.ma.masked
if dtype == 'i':
vs = (2, [2, 1])
else:
vs = ('2', ['2', '1'], b'2', [b'2', b'1'])
for v in vs:
v = np.array(v, dtype=dtype)
exp = np.searchsorted(c.data, v, side='right')
res = c.searchsorted(v, side='right')
assert np.all(res == exp)
res = np.searchsorted(c, v, side='right')
assert np.all(res == exp)
| pllim/astropy | astropy/table/tests/test_column.py | Python | bsd-3-clause | 34,643 |
"""Tests for user-friendly public interface to polynomial functions. """
from sympy.polys.polytools import (
Poly, PurePoly, poly,
parallel_poly_from_expr,
degree, degree_list,
LC, LM, LT,
pdiv, prem, pquo, pexquo,
div, rem, quo, exquo,
half_gcdex, gcdex, invert,
subresultants,
resultant, discriminant,
terms_gcd, cofactors,
gcd, gcd_list,
lcm, lcm_list,
trunc,
monic, content, primitive,
compose, decompose,
sturm,
gff_list, gff,
sqf_norm, sqf_part, sqf_list, sqf,
factor_list, factor,
intervals, refine_root, count_roots,
real_roots, nroots, ground_roots,
nth_power_roots_poly,
cancel, reduced, groebner,
GroebnerBasis, is_zero_dimensional,
_torational_factor_list,
to_rational_coeffs)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
UnificationFailed,
RefinementFailed,
GeneratorsNeeded,
GeneratorsError,
PolynomialError,
CoercionFailed,
DomainError,
OptionError,
FlagError)
from sympy.polys.polyclasses import DMP
from sympy.polys.fields import field
from sympy.polys.domains import FF, ZZ, QQ, RR, EX
from sympy.polys.domains.realfield import RealField
from sympy.polys.orderings import lex, grlex, grevlex
from sympy import (
S, Integer, Rational, Float, Mul, Symbol, sqrt, Piecewise, Derivative,
exp, sin, tanh, expand, oo, I, pi, re, im, rootof, Eq, Tuple, Expr, diff)
from sympy.core.basic import _aresame
from sympy.core.compatibility import iterable
from sympy.core.mul import _keep_coeff
from sympy.utilities.pytest import raises, XFAIL
from sympy.simplify import simplify
from sympy.abc import a, b, c, d, p, q, t, w, x, y, z
from sympy import MatrixSymbol
def _epsilon_eq(a, b):
for x, y in zip(a, b):
if abs(x - y) > 1e-10:
return False
return True
def _strict_eq(a, b):
if type(a) == type(b):
if iterable(a):
if len(a) == len(b):
return all(_strict_eq(c, d) for c, d in zip(a, b))
else:
return False
else:
return isinstance(a, Poly) and a.eq(b, strict=True)
else:
return False
def test_Poly_from_dict():
K = FF(3)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{0: 1, 1: 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict(
{(0,): 1, (1,): 5}, gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_dict({(0, 0): 1, (1, 1): 2}, gens=(
x, y), domain=K).rep == DMP([[K(2), K(0)], [K(1)]], K)
assert Poly.from_dict({0: 1, 1: 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{0: 1, 1: 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_dict(
{(0,): 1, (1,): 2}, gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_dict({(1,): sin(y)}, gens=x, composite=False) == \
Poly(sin(y)*x, x, domain='EX')
assert Poly.from_dict({(1,): y}, gens=x, composite=False) == \
Poly(y*x, x, domain='EX')
assert Poly.from_dict({(1, 1): 1}, gens=(x, y), composite=False) == \
Poly(x*y, x, y, domain='ZZ')
assert Poly.from_dict({(1, 0): y}, gens=(x, z), composite=False) == \
Poly(y*x, x, z, domain='EX')
def test_Poly_from_list():
K = FF(3)
assert Poly.from_list([2, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([5, 1], gens=x, domain=K).rep == DMP([K(2), K(1)], K)
assert Poly.from_list([2, 1], gens=x).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, field=True).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([2, 1], gens=x, domain=ZZ).rep == DMP([ZZ(2), ZZ(1)], ZZ)
assert Poly.from_list([2, 1], gens=x, domain=QQ).rep == DMP([QQ(2), QQ(1)], QQ)
assert Poly.from_list([0, 1.0], gens=x).rep == DMP([RR(1.0)], RR)
assert Poly.from_list([1.0, 0], gens=x).rep == DMP([RR(1.0), RR(0.0)], RR)
raises(MultivariatePolynomialError, lambda: Poly.from_list([[]], gens=(x, y)))
def test_Poly_from_poly():
f = Poly(x + 7, x, domain=ZZ)
g = Poly(x + 2, x, modulus=3)
h = Poly(x + y, x, y, domain=ZZ)
K = FF(3)
assert Poly.from_poly(f) == f
assert Poly.from_poly(f, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=x) == f
assert Poly.from_poly(f, gens=x, domain=K).rep == DMP([K(1), K(1)], K)
assert Poly.from_poly(f, gens=x, domain=ZZ).rep == DMP([1, 7], ZZ)
assert Poly.from_poly(f, gens=x, domain=QQ).rep == DMP([1, 7], QQ)
assert Poly.from_poly(f, gens=y) == Poly(x + 7, y, domain='ZZ[x]')
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=K))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=ZZ))
raises(CoercionFailed, lambda: Poly.from_poly(f, gens=y, domain=QQ))
assert Poly.from_poly(f, gens=(x, y)) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
f, gens=(x, y), domain=ZZ) == Poly(x + 7, x, y, domain='ZZ')
assert Poly.from_poly(
f, gens=(x, y), domain=QQ) == Poly(x + 7, x, y, domain='QQ')
assert Poly.from_poly(
f, gens=(x, y), modulus=3) == Poly(x + 7, x, y, domain='FF(3)')
K = FF(2)
assert Poly.from_poly(g) == g
assert Poly.from_poly(g, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, domain=QQ))
assert Poly.from_poly(g, domain=K).rep == DMP([K(1), K(0)], K)
assert Poly.from_poly(g, gens=x) == g
assert Poly.from_poly(g, gens=x, domain=ZZ).rep == DMP([1, -1], ZZ)
raises(CoercionFailed, lambda: Poly.from_poly(g, gens=x, domain=QQ))
assert Poly.from_poly(g, gens=x, domain=K).rep == DMP([K(1), K(0)], K)
K = FF(3)
assert Poly.from_poly(h) == h
assert Poly.from_poly(
h, domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(h, domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(h, gens=x) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=ZZ))
assert Poly.from_poly(
h, gens=x, domain=ZZ[y]) == Poly(x + y, x, domain=ZZ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, domain=QQ))
assert Poly.from_poly(
h, gens=x, domain=QQ[y]) == Poly(x + y, x, domain=QQ[y])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=x, modulus=3))
assert Poly.from_poly(h, gens=y) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=ZZ))
assert Poly.from_poly(
h, gens=y, domain=ZZ[x]) == Poly(x + y, y, domain=ZZ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, domain=QQ))
assert Poly.from_poly(
h, gens=y, domain=QQ[x]) == Poly(x + y, y, domain=QQ[x])
raises(CoercionFailed, lambda: Poly.from_poly(h, gens=y, modulus=3))
assert Poly.from_poly(h, gens=(x, y)) == h
assert Poly.from_poly(
h, gens=(x, y), domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(x, y), domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(x, y), domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(
h, gens=(y, x)).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(y, x), domain=ZZ).rep == DMP([[ZZ(1)], [ZZ(1), ZZ(0)]], ZZ)
assert Poly.from_poly(
h, gens=(y, x), domain=QQ).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(y, x), domain=K).rep == DMP([[K(1)], [K(1), K(0)]], K)
assert Poly.from_poly(
h, gens=(x, y), field=True).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
assert Poly.from_poly(
h, gens=(x, y), field=True).rep == DMP([[QQ(1)], [QQ(1), QQ(0)]], QQ)
def test_Poly_from_expr():
raises(GeneratorsNeeded, lambda: Poly.from_expr(S(0)))
raises(GeneratorsNeeded, lambda: Poly.from_expr(S(7)))
F3 = FF(3)
assert Poly.from_expr(x + 5, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(y + 5, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(x + 5, x, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(y + 5, y, domain=F3).rep == DMP([F3(1), F3(2)], F3)
assert Poly.from_expr(x + y, domain=F3).rep == DMP([[F3(1)], [F3(1), F3(0)]], F3)
assert Poly.from_expr(x + y, x, y, domain=F3).rep == DMP([[F3(1)], [F3(1), F3(0)]], F3)
assert Poly.from_expr(x + 5).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, y).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(y + 5, y, domain=ZZ).rep == DMP([1, 5], ZZ)
assert Poly.from_expr(x + 5, x, y, domain=ZZ).rep == DMP([[1], [5]], ZZ)
assert Poly.from_expr(y + 5, x, y, domain=ZZ).rep == DMP([[1, 5]], ZZ)
def test_Poly__new__():
raises(GeneratorsError, lambda: Poly(x + 1, x, x))
raises(GeneratorsError, lambda: Poly(x + y, x, y, domain=ZZ[x]))
raises(GeneratorsError, lambda: Poly(x + y, x, y, domain=ZZ[y]))
raises(OptionError, lambda: Poly(x, x, symmetric=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, domain=QQ))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, gaussian=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, gaussian=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, extension=[sqrt(3)]))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, extension=[sqrt(3)]))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, extension=True))
raises(OptionError, lambda: Poly(x + 2, x, modulus=3, extension=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, greedy=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=QQ, field=True))
raises(OptionError, lambda: Poly(x + 2, x, domain=ZZ, greedy=False))
raises(OptionError, lambda: Poly(x + 2, x, domain=QQ, field=False))
raises(NotImplementedError, lambda: Poly(x + 1, x, modulus=3, order='grlex'))
raises(NotImplementedError, lambda: Poly(x + 1, x, order='grlex'))
raises(GeneratorsNeeded, lambda: Poly({1: 2, 0: 1}))
raises(GeneratorsNeeded, lambda: Poly([2, 1]))
raises(GeneratorsNeeded, lambda: Poly((2, 1)))
raises(GeneratorsNeeded, lambda: Poly(1))
f = a*x**2 + b*x + c
assert Poly({2: a, 1: b, 0: c}, x) == f
assert Poly(iter([a, b, c]), x) == f
assert Poly([a, b, c], x) == f
assert Poly((a, b, c), x) == f
f = Poly({}, x, y, z)
assert f.gens == (x, y, z) and f.as_expr() == 0
assert Poly(Poly(a*x + b*y, x, y), x) == Poly(a*x + b*y, x)
assert Poly(3*x**2 + 2*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(3*x**2 + 2*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, lambda: Poly(3*x**2/5 + 2*x/5 + 1, domain='ZZ'))
assert Poly(
3*x**2/5 + 2*x/5 + 1, domain='QQ').all_coeffs() == [S(3)/5, S(2)/5, 1]
assert _epsilon_eq(
Poly(3*x**2/5 + 2*x/5 + 1, domain='RR').all_coeffs(), [0.6, 0.4, 1.0])
assert Poly(3.0*x**2 + 2.0*x + 1, domain='ZZ').all_coeffs() == [3, 2, 1]
assert Poly(3.0*x**2 + 2.0*x + 1, domain='QQ').all_coeffs() == [3, 2, 1]
assert Poly(
3.0*x**2 + 2.0*x + 1, domain='RR').all_coeffs() == [3.0, 2.0, 1.0]
raises(CoercionFailed, lambda: Poly(3.1*x**2 + 2.1*x + 1, domain='ZZ'))
assert Poly(3.1*x**2 + 2.1*x + 1, domain='QQ').all_coeffs() == [S(31)/10, S(21)/10, 1]
assert Poly(3.1*x**2 + 2.1*x + 1, domain='RR').all_coeffs() == [3.1, 2.1, 1.0]
assert Poly({(2, 1): 1, (1, 2): 2, (1, 1): 3}, x, y) == \
Poly(x**2*y + 2*x*y**2 + 3*x*y, x, y)
assert Poly(x**2 + 1, extension=I).get_domain() == QQ.algebraic_field(I)
f = 3*x**5 - x**4 + x**3 - x** 2 + 65538
assert Poly(f, x, modulus=65537, symmetric=True) == \
Poly(3*x**5 - x**4 + x**3 - x** 2 + 1, x, modulus=65537,
symmetric=True)
assert Poly(f, x, modulus=65537, symmetric=False) == \
Poly(3*x**5 + 65536*x**4 + x**3 + 65536*x** 2 + 1, x,
modulus=65537, symmetric=False)
assert isinstance(Poly(x**2 + x + 1.0).get_domain(), RealField)
def test_Poly__args():
assert Poly(x**2 + 1).args == (x**2 + 1,)
def test_Poly__gens():
assert Poly((x - p)*(x - q), x).gens == (x,)
assert Poly((x - p)*(x - q), p).gens == (p,)
assert Poly((x - p)*(x - q), q).gens == (q,)
assert Poly((x - p)*(x - q), x, p).gens == (x, p)
assert Poly((x - p)*(x - q), x, q).gens == (x, q)
assert Poly((x - p)*(x - q), x, p, q).gens == (x, p, q)
assert Poly((x - p)*(x - q), p, x, q).gens == (p, x, q)
assert Poly((x - p)*(x - q), p, q, x).gens == (p, q, x)
assert Poly((x - p)*(x - q)).gens == (x, p, q)
assert Poly((x - p)*(x - q), sort='x > p > q').gens == (x, p, q)
assert Poly((x - p)*(x - q), sort='p > x > q').gens == (p, x, q)
assert Poly((x - p)*(x - q), sort='p > q > x').gens == (p, q, x)
assert Poly((x - p)*(x - q), x, p, q, sort='p > q > x').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='x').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='p').gens == (p, x, q)
assert Poly((x - p)*(x - q), wrt='q').gens == (q, x, p)
assert Poly((x - p)*(x - q), wrt=x).gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt=p).gens == (p, x, q)
assert Poly((x - p)*(x - q), wrt=q).gens == (q, x, p)
assert Poly((x - p)*(x - q), x, p, q, wrt='p').gens == (x, p, q)
assert Poly((x - p)*(x - q), wrt='p', sort='q > x').gens == (p, q, x)
assert Poly((x - p)*(x - q), wrt='q', sort='p > x').gens == (q, p, x)
def test_Poly_zero():
assert Poly(x).zero == Poly(0, x, domain=ZZ)
assert Poly(x/2).zero == Poly(0, x, domain=QQ)
def test_Poly_one():
assert Poly(x).one == Poly(1, x, domain=ZZ)
assert Poly(x/2).one == Poly(1, x, domain=QQ)
def test_Poly__unify():
raises(UnificationFailed, lambda: Poly(x)._unify(y))
F3 = FF(3)
F5 = FF(5)
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=3))[2:] == (
DMP([[F3(1)], []], F3), DMP([[F3(1), F3(0)]], F3))
assert Poly(x, x, modulus=3)._unify(Poly(y, y, modulus=5))[2:] == (
DMP([[F5(1)], []], F5), DMP([[F5(1), F5(0)]], F5))
assert Poly(y, x, y)._unify(Poly(x, x, modulus=3))[2:] == (DMP([[F3(1), F3(0)]], F3), DMP([[F3(1)], []], F3))
assert Poly(x, x, modulus=3)._unify(Poly(y, x, y))[2:] == (DMP([[F3(1)], []], F3), DMP([[F3(1), F3(0)]], F3))
assert Poly(x + 1, x)._unify(Poly(x + 2, x))[2:] == (DMP([1, 1], ZZ), DMP([1, 2], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([1, 1], QQ), DMP([1, 2], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, y, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, x, domain='QQ')._unify(Poly(x + 2, y, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, x)._unify(Poly(x + 2, y, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, y, x, domain='QQ')._unify(Poly(x + 2, x))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, y, x))[2:] == (DMP([[1], [1]], ZZ), DMP([[1], [2]], ZZ))
assert Poly(x + 1, x, y, domain='QQ')._unify(Poly(x + 2, y, x))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, x, y)._unify(Poly(x + 2, y, x, domain='QQ'))[2:] == (DMP([[1], [1]], QQ), DMP([[1], [2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, y))[2:] == (DMP([[1, 1]], ZZ), DMP([[1, 2]], ZZ))
assert Poly(x + 1, y, x, domain='QQ')._unify(Poly(x + 2, x, y))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
assert Poly(x + 1, y, x)._unify(Poly(x + 2, x, y, domain='QQ'))[2:] == (DMP([[1, 1]], QQ), DMP([[1, 2]], QQ))
F, A, B = field("a,b", ZZ)
assert Poly(a*x, x, domain='ZZ[a]')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([A, F(0)], F.to_domain()), DMP([A*B, F(0)], F.to_domain()))
assert Poly(a*x, x, domain='ZZ(a)')._unify(Poly(a*b*x, x, domain='ZZ(a,b)'))[2:] == \
(DMP([A, F(0)], F.to_domain()), DMP([A*B, F(0)], F.to_domain()))
raises(CoercionFailed, lambda: Poly(Poly(x**2 + x**2*z, y, field=True), domain='ZZ(x)'))
f = Poly(t**2 + t/3 + x, t, domain='QQ(x)')
g = Poly(t**2 + t/3 + x, t, domain='QQ[x]')
assert f._unify(g)[2:] == (f.rep, f.rep)
def test_Poly_free_symbols():
assert Poly(x**2 + 1).free_symbols == {x}
assert Poly(x**2 + y*z).free_symbols == {x, y, z}
assert Poly(x**2 + y*z, x).free_symbols == {x, y, z}
assert Poly(x**2 + sin(y*z)).free_symbols == {x, y, z}
assert Poly(x**2 + sin(y*z), x).free_symbols == {x, y, z}
assert Poly(x**2 + sin(y*z), x, domain=EX).free_symbols == {x, y, z}
def test_PurePoly_free_symbols():
assert PurePoly(x**2 + 1).free_symbols == set([])
assert PurePoly(x**2 + y*z).free_symbols == set([])
assert PurePoly(x**2 + y*z, x).free_symbols == {y, z}
assert PurePoly(x**2 + sin(y*z)).free_symbols == set([])
assert PurePoly(x**2 + sin(y*z), x).free_symbols == {y, z}
assert PurePoly(x**2 + sin(y*z), x, domain=EX).free_symbols == {y, z}
def test_Poly__eq__():
assert (Poly(x, x) == Poly(x, x)) is True
assert (Poly(x, x, domain=QQ) == Poly(x, x)) is True
assert (Poly(x, x) == Poly(x, x, domain=QQ)) is True
assert (Poly(x, x, domain=ZZ[a]) == Poly(x, x)) is True
assert (Poly(x, x) == Poly(x, x, domain=ZZ[a])) is True
assert (Poly(x*y, x, y) == Poly(x, x)) is False
assert (Poly(x, x, y) == Poly(x, x)) is False
assert (Poly(x, x) == Poly(x, x, y)) is False
assert (Poly(x**2 + 1, x) == Poly(y**2 + 1, y)) is False
assert (Poly(y**2 + 1, y) == Poly(x**2 + 1, x)) is False
f = Poly(x, x, domain=ZZ)
g = Poly(x, x, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
t0 = Symbol('t0')
f = Poly((t0/2 + x**2)*t**2 - x**2*t, t, domain='QQ[x,t0]')
g = Poly((t0/2 + x**2)*t**2 - x**2*t, t, domain='ZZ(x,t0)')
assert (f == g) is True
def test_PurePoly__eq__():
assert (PurePoly(x, x) == PurePoly(x, x)) is True
assert (PurePoly(x, x, domain=QQ) == PurePoly(x, x)) is True
assert (PurePoly(x, x) == PurePoly(x, x, domain=QQ)) is True
assert (PurePoly(x, x, domain=ZZ[a]) == PurePoly(x, x)) is True
assert (PurePoly(x, x) == PurePoly(x, x, domain=ZZ[a])) is True
assert (PurePoly(x*y, x, y) == PurePoly(x, x)) is False
assert (PurePoly(x, x, y) == PurePoly(x, x)) is False
assert (PurePoly(x, x) == PurePoly(x, x, y)) is False
assert (PurePoly(x**2 + 1, x) == PurePoly(y**2 + 1, y)) is True
assert (PurePoly(y**2 + 1, y) == PurePoly(x**2 + 1, x)) is True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(x, x, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
f = PurePoly(x, x, domain=ZZ)
g = PurePoly(y, y, domain=QQ)
assert f.eq(g) is True
assert f.ne(g) is False
assert f.eq(g, strict=True) is False
assert f.ne(g, strict=True) is True
def test_PurePoly_Poly():
assert isinstance(PurePoly(Poly(x**2 + 1)), PurePoly) is True
assert isinstance(Poly(PurePoly(x**2 + 1)), Poly) is True
def test_Poly_get_domain():
assert Poly(2*x).get_domain() == ZZ
assert Poly(2*x, domain='ZZ').get_domain() == ZZ
assert Poly(2*x, domain='QQ').get_domain() == QQ
assert Poly(x/2).get_domain() == QQ
raises(CoercionFailed, lambda: Poly(x/2, domain='ZZ'))
assert Poly(x/2, domain='QQ').get_domain() == QQ
assert isinstance(Poly(0.2*x).get_domain(), RealField)
def test_Poly_set_domain():
assert Poly(2*x + 1).set_domain(ZZ) == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain('ZZ') == Poly(2*x + 1)
assert Poly(2*x + 1).set_domain(QQ) == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1).set_domain('QQ') == Poly(2*x + 1, domain='QQ')
assert Poly(S(2)/10*x + S(1)/10).set_domain('RR') == Poly(0.2*x + 0.1)
assert Poly(0.2*x + 0.1).set_domain('QQ') == Poly(S(2)/10*x + S(1)/10)
raises(CoercionFailed, lambda: Poly(x/2 + 1).set_domain(ZZ))
raises(CoercionFailed, lambda: Poly(x + 1, modulus=2).set_domain(QQ))
raises(GeneratorsError, lambda: Poly(x*y, x, y).set_domain(ZZ[y]))
def test_Poly_get_modulus():
assert Poly(x**2 + 1, modulus=2).get_modulus() == 2
raises(PolynomialError, lambda: Poly(x**2 + 1).get_modulus())
def test_Poly_set_modulus():
assert Poly(
x**2 + 1, modulus=2).set_modulus(7) == Poly(x**2 + 1, modulus=7)
assert Poly(
x**2 + 5, modulus=7).set_modulus(2) == Poly(x**2 + 1, modulus=2)
assert Poly(x**2 + 1).set_modulus(2) == Poly(x**2 + 1, modulus=2)
raises(CoercionFailed, lambda: Poly(x/2 + 1).set_modulus(2))
def test_Poly_add_ground():
assert Poly(x + 1).add_ground(2) == Poly(x + 3)
def test_Poly_sub_ground():
assert Poly(x + 1).sub_ground(2) == Poly(x - 1)
def test_Poly_mul_ground():
assert Poly(x + 1).mul_ground(2) == Poly(2*x + 2)
def test_Poly_quo_ground():
assert Poly(2*x + 4).quo_ground(2) == Poly(x + 2)
assert Poly(2*x + 3).quo_ground(2) == Poly(x + 1)
def test_Poly_exquo_ground():
assert Poly(2*x + 4).exquo_ground(2) == Poly(x + 2)
raises(ExactQuotientFailed, lambda: Poly(2*x + 3).exquo_ground(2))
def test_Poly_abs():
assert Poly(-x + 1, x).abs() == abs(Poly(-x + 1, x)) == Poly(x + 1, x)
def test_Poly_neg():
assert Poly(-x + 1, x).neg() == -Poly(-x + 1, x) == Poly(x - 1, x)
def test_Poly_add():
assert Poly(0, x).add(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) + Poly(0, x) == Poly(0, x)
assert Poly(1, x).add(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) + Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).add(Poly(1, x, y)) == Poly(1, x, y)
assert Poly(0, x, y) + Poly(1, x, y) == Poly(1, x, y)
assert Poly(1, x) + x == Poly(x + 1, x)
assert Poly(1, x) + sin(x) == 1 + sin(x)
assert Poly(x, x) + 1 == Poly(x + 1, x)
assert 1 + Poly(x, x) == Poly(x + 1, x)
def test_Poly_sub():
assert Poly(0, x).sub(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) - Poly(0, x) == Poly(0, x)
assert Poly(1, x).sub(Poly(0, x)) == Poly(1, x)
assert Poly(1, x, y) - Poly(0, x) == Poly(1, x, y)
assert Poly(0, x).sub(Poly(1, x, y)) == Poly(-1, x, y)
assert Poly(0, x, y) - Poly(1, x, y) == Poly(-1, x, y)
assert Poly(1, x) - x == Poly(1 - x, x)
assert Poly(1, x) - sin(x) == 1 - sin(x)
assert Poly(x, x) - 1 == Poly(x - 1, x)
assert 1 - Poly(x, x) == Poly(1 - x, x)
def test_Poly_mul():
assert Poly(0, x).mul(Poly(0, x)) == Poly(0, x)
assert Poly(0, x) * Poly(0, x) == Poly(0, x)
assert Poly(2, x).mul(Poly(4, x)) == Poly(8, x)
assert Poly(2, x, y) * Poly(4, x) == Poly(8, x, y)
assert Poly(4, x).mul(Poly(2, x, y)) == Poly(8, x, y)
assert Poly(4, x, y) * Poly(2, x, y) == Poly(8, x, y)
assert Poly(1, x) * x == Poly(x, x)
assert Poly(1, x) * sin(x) == sin(x)
assert Poly(x, x) * 2 == Poly(2*x, x)
assert 2 * Poly(x, x) == Poly(2*x, x)
def test_Poly_sqr():
assert Poly(x*y, x, y).sqr() == Poly(x**2*y**2, x, y)
def test_Poly_pow():
assert Poly(x, x).pow(10) == Poly(x**10, x)
assert Poly(x, x).pow(Integer(10)) == Poly(x**10, x)
assert Poly(2*y, x, y).pow(4) == Poly(16*y**4, x, y)
assert Poly(2*y, x, y).pow(Integer(4)) == Poly(16*y**4, x, y)
assert Poly(7*x*y, x, y)**3 == Poly(343*x**3*y**3, x, y)
assert Poly(x*y + 1, x, y)**(-1) == (x*y + 1)**(-1)
assert Poly(x*y + 1, x, y)**x == (x*y + 1)**x
def test_Poly_divmod():
f, g = Poly(x**2), Poly(x)
q, r = g, Poly(0, x)
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
assert divmod(f, x) == (q, r)
assert f // x == q
assert f % x == r
q, r = Poly(0, x), Poly(2, x)
assert divmod(2, g) == (q, r)
assert 2 // g == q
assert 2 % g == r
assert Poly(x)/Poly(x) == 1
assert Poly(x**2)/Poly(x) == x
assert Poly(x)/Poly(x**2) == 1/x
def test_Poly_eq_ne():
assert (Poly(x + y, x, y) == Poly(x + y, x, y)) is True
assert (Poly(x + y, x) == Poly(x + y, x, y)) is False
assert (Poly(x + y, x, y) == Poly(x + y, x)) is False
assert (Poly(x + y, x) == Poly(x + y, x)) is True
assert (Poly(x + y, y) == Poly(x + y, y)) is True
assert (Poly(x + y, x, y) == x + y) is True
assert (Poly(x + y, x) == x + y) is True
assert (Poly(x + y, x, y) == x + y) is True
assert (Poly(x + y, x) == x + y) is True
assert (Poly(x + y, y) == x + y) is True
assert (Poly(x + y, x, y) != Poly(x + y, x, y)) is False
assert (Poly(x + y, x) != Poly(x + y, x, y)) is True
assert (Poly(x + y, x, y) != Poly(x + y, x)) is True
assert (Poly(x + y, x) != Poly(x + y, x)) is False
assert (Poly(x + y, y) != Poly(x + y, y)) is False
assert (Poly(x + y, x, y) != x + y) is False
assert (Poly(x + y, x) != x + y) is False
assert (Poly(x + y, x, y) != x + y) is False
assert (Poly(x + y, x) != x + y) is False
assert (Poly(x + y, y) != x + y) is False
assert (Poly(x, x) == sin(x)) is False
assert (Poly(x, x) != sin(x)) is True
def test_Poly_nonzero():
assert not bool(Poly(0, x)) is True
assert not bool(Poly(1, x)) is False
def test_Poly_properties():
assert Poly(0, x).is_zero is True
assert Poly(1, x).is_zero is False
assert Poly(1, x).is_one is True
assert Poly(2, x).is_one is False
assert Poly(x - 1, x).is_sqf is True
assert Poly((x - 1)**2, x).is_sqf is False
assert Poly(x - 1, x).is_monic is True
assert Poly(2*x - 1, x).is_monic is False
assert Poly(3*x + 2, x).is_primitive is True
assert Poly(4*x + 2, x).is_primitive is False
assert Poly(1, x).is_ground is True
assert Poly(x, x).is_ground is False
assert Poly(x + y + z + 1).is_linear is True
assert Poly(x*y*z + 1).is_linear is False
assert Poly(x*y + z + 1).is_quadratic is True
assert Poly(x*y*z + 1).is_quadratic is False
assert Poly(x*y).is_monomial is True
assert Poly(x*y + 1).is_monomial is False
assert Poly(x**2 + x*y).is_homogeneous is True
assert Poly(x**3 + x*y).is_homogeneous is False
assert Poly(x).is_univariate is True
assert Poly(x*y).is_univariate is False
assert Poly(x*y).is_multivariate is True
assert Poly(x).is_multivariate is False
assert Poly(
x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1).is_cyclotomic is False
assert Poly(
x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1).is_cyclotomic is True
def test_Poly_is_irreducible():
assert Poly(x**2 + x + 1).is_irreducible is True
assert Poly(x**2 + 2*x + 1).is_irreducible is False
assert Poly(7*x + 3, modulus=11).is_irreducible is True
assert Poly(7*x**2 + 3*x + 1, modulus=11).is_irreducible is False
def test_Poly_subs():
assert Poly(x + 1).subs(x, 0) == 1
assert Poly(x + 1).subs(x, x) == Poly(x + 1)
assert Poly(x + 1).subs(x, y) == Poly(y + 1)
assert Poly(x*y, x).subs(y, x) == x**2
assert Poly(x*y, x).subs(x, y) == y**2
def test_Poly_replace():
assert Poly(x + 1).replace(x) == Poly(x + 1)
assert Poly(x + 1).replace(y) == Poly(y + 1)
raises(PolynomialError, lambda: Poly(x + y).replace(z))
assert Poly(x + 1).replace(x, x) == Poly(x + 1)
assert Poly(x + 1).replace(x, y) == Poly(y + 1)
assert Poly(x + y).replace(x, x) == Poly(x + y)
assert Poly(x + y).replace(x, z) == Poly(z + y, z, y)
assert Poly(x + y).replace(y, y) == Poly(x + y)
assert Poly(x + y).replace(y, z) == Poly(x + z, x, z)
raises(PolynomialError, lambda: Poly(x + y).replace(x, y))
raises(PolynomialError, lambda: Poly(x + y).replace(z, t))
assert Poly(x + y, x).replace(x, z) == Poly(z + y, z)
assert Poly(x + y, y).replace(y, z) == Poly(x + z, z)
raises(PolynomialError, lambda: Poly(x + y, x).replace(x, y))
raises(PolynomialError, lambda: Poly(x + y, y).replace(y, x))
def test_Poly_reorder():
raises(PolynomialError, lambda: Poly(x + y).reorder(x, z))
assert Poly(x + y, x, y).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, y, x).reorder(x, y) == Poly(x + y, x, y)
assert Poly(x + y, y, x).reorder(y, x) == Poly(x + y, y, x)
assert Poly(x + y, x, y).reorder(wrt=x) == Poly(x + y, x, y)
assert Poly(x + y, x, y).reorder(wrt=y) == Poly(x + y, y, x)
def test_Poly_ltrim():
f = Poly(y**2 + y*z**2, x, y, z).ltrim(y)
assert f.as_expr() == y**2 + y*z**2 and f.gens == (y, z)
raises(PolynomialError, lambda: Poly(x*y**2 + y**2, x, y).ltrim(y))
def test_Poly_has_only_gens():
assert Poly(x*y + 1, x, y, z).has_only_gens(x, y) is True
assert Poly(x*y + z, x, y, z).has_only_gens(x, y) is False
raises(GeneratorsError, lambda: Poly(x*y**2 + y**2, x, y).has_only_gens(t))
def test_Poly_to_ring():
assert Poly(2*x + 1, domain='ZZ').to_ring() == Poly(2*x + 1, domain='ZZ')
assert Poly(2*x + 1, domain='QQ').to_ring() == Poly(2*x + 1, domain='ZZ')
raises(CoercionFailed, lambda: Poly(x/2 + 1).to_ring())
raises(DomainError, lambda: Poly(2*x + 1, modulus=3).to_ring())
def test_Poly_to_field():
assert Poly(2*x + 1, domain='ZZ').to_field() == Poly(2*x + 1, domain='QQ')
assert Poly(2*x + 1, domain='QQ').to_field() == Poly(2*x + 1, domain='QQ')
assert Poly(x/2 + 1, domain='QQ').to_field() == Poly(x/2 + 1, domain='QQ')
assert Poly(2*x + 1, modulus=3).to_field() == Poly(2*x + 1, modulus=3)
assert Poly(2.0*x + 1.0).to_field() == Poly(2.0*x + 1.0)
def test_Poly_to_exact():
assert Poly(2*x).to_exact() == Poly(2*x)
assert Poly(x/2).to_exact() == Poly(x/2)
assert Poly(0.1*x).to_exact() == Poly(x/10)
def test_Poly_retract():
f = Poly(x**2 + 1, x, domain=QQ[y])
assert f.retract() == Poly(x**2 + 1, x, domain='ZZ')
assert f.retract(field=True) == Poly(x**2 + 1, x, domain='QQ')
assert Poly(0, x, y).retract() == Poly(0, x, y)
def test_Poly_slice():
f = Poly(x**3 + 2*x**2 + 3*x + 4)
assert f.slice(0, 0) == Poly(0, x)
assert f.slice(0, 1) == Poly(4, x)
assert f.slice(0, 2) == Poly(3*x + 4, x)
assert f.slice(0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 0) == Poly(0, x)
assert f.slice(x, 0, 1) == Poly(4, x)
assert f.slice(x, 0, 2) == Poly(3*x + 4, x)
assert f.slice(x, 0, 3) == Poly(2*x**2 + 3*x + 4, x)
assert f.slice(x, 0, 4) == Poly(x**3 + 2*x**2 + 3*x + 4, x)
def test_Poly_coeffs():
assert Poly(0, x).coeffs() == [0]
assert Poly(1, x).coeffs() == [1]
assert Poly(2*x + 1, x).coeffs() == [2, 1]
assert Poly(7*x**2 + 2*x + 1, x).coeffs() == [7, 2, 1]
assert Poly(7*x**4 + 2*x + 1, x).coeffs() == [7, 2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('lex') == [2, 1]
assert Poly(x*y**7 + 2*x**2*y**3).coeffs('grlex') == [1, 2]
def test_Poly_monoms():
assert Poly(0, x).monoms() == [(0,)]
assert Poly(1, x).monoms() == [(0,)]
assert Poly(2*x + 1, x).monoms() == [(1,), (0,)]
assert Poly(7*x**2 + 2*x + 1, x).monoms() == [(2,), (1,), (0,)]
assert Poly(7*x**4 + 2*x + 1, x).monoms() == [(4,), (1,), (0,)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('lex') == [(2, 3), (1, 7)]
assert Poly(x*y**7 + 2*x**2*y**3).monoms('grlex') == [(1, 7), (2, 3)]
def test_Poly_terms():
assert Poly(0, x).terms() == [((0,), 0)]
assert Poly(1, x).terms() == [((0,), 1)]
assert Poly(2*x + 1, x).terms() == [((1,), 2), ((0,), 1)]
assert Poly(7*x**2 + 2*x + 1, x).terms() == [((2,), 7), ((1,), 2), ((0,), 1)]
assert Poly(7*x**4 + 2*x + 1, x).terms() == [((4,), 7), ((1,), 2), ((0,), 1)]
assert Poly(
x*y**7 + 2*x**2*y**3).terms('lex') == [((2, 3), 2), ((1, 7), 1)]
assert Poly(
x*y**7 + 2*x**2*y**3).terms('grlex') == [((1, 7), 1), ((2, 3), 2)]
def test_Poly_all_coeffs():
assert Poly(0, x).all_coeffs() == [0]
assert Poly(1, x).all_coeffs() == [1]
assert Poly(2*x + 1, x).all_coeffs() == [2, 1]
assert Poly(7*x**2 + 2*x + 1, x).all_coeffs() == [7, 2, 1]
assert Poly(7*x**4 + 2*x + 1, x).all_coeffs() == [7, 0, 0, 2, 1]
def test_Poly_all_monoms():
assert Poly(0, x).all_monoms() == [(0,)]
assert Poly(1, x).all_monoms() == [(0,)]
assert Poly(2*x + 1, x).all_monoms() == [(1,), (0,)]
assert Poly(7*x**2 + 2*x + 1, x).all_monoms() == [(2,), (1,), (0,)]
assert Poly(7*x**4 + 2*x + 1, x).all_monoms() == [(4,), (3,), (2,), (1,), (0,)]
def test_Poly_all_terms():
assert Poly(0, x).all_terms() == [((0,), 0)]
assert Poly(1, x).all_terms() == [((0,), 1)]
assert Poly(2*x + 1, x).all_terms() == [((1,), 2), ((0,), 1)]
assert Poly(7*x**2 + 2*x + 1, x).all_terms() == \
[((2,), 7), ((1,), 2), ((0,), 1)]
assert Poly(7*x**4 + 2*x + 1, x).all_terms() == \
[((4,), 7), ((3,), 0), ((2,), 0), ((1,), 2), ((0,), 1)]
def test_Poly_termwise():
f = Poly(x**2 + 20*x + 400)
g = Poly(x**2 + 2*x + 4)
def func(monom, coeff):
(k,) = monom
return coeff//10**(2 - k)
assert f.termwise(func) == g
def func(monom, coeff):
(k,) = monom
return (k,), coeff//10**(2 - k)
assert f.termwise(func) == g
def test_Poly_length():
assert Poly(0, x).length() == 0
assert Poly(1, x).length() == 1
assert Poly(x, x).length() == 1
assert Poly(x + 1, x).length() == 2
assert Poly(x**2 + 1, x).length() == 2
assert Poly(x**2 + x + 1, x).length() == 3
def test_Poly_as_dict():
assert Poly(0, x).as_dict() == {}
assert Poly(0, x, y, z).as_dict() == {}
assert Poly(1, x).as_dict() == {(0,): 1}
assert Poly(1, x, y, z).as_dict() == {(0, 0, 0): 1}
assert Poly(x**2 + 3, x).as_dict() == {(2,): 1, (0,): 3}
assert Poly(x**2 + 3, x, y, z).as_dict() == {(2, 0, 0): 1, (0, 0, 0): 3}
assert Poly(3*x**2*y*z**3 + 4*x*y + 5*x*z).as_dict() == {(2, 1, 3): 3,
(1, 1, 0): 4, (1, 0, 1): 5}
def test_Poly_as_expr():
assert Poly(0, x).as_expr() == 0
assert Poly(0, x, y, z).as_expr() == 0
assert Poly(1, x).as_expr() == 1
assert Poly(1, x, y, z).as_expr() == 1
assert Poly(x**2 + 3, x).as_expr() == x**2 + 3
assert Poly(x**2 + 3, x, y, z).as_expr() == x**2 + 3
assert Poly(
3*x**2*y*z**3 + 4*x*y + 5*x*z).as_expr() == 3*x**2*y*z**3 + 4*x*y + 5*x*z
f = Poly(x**2 + 2*x*y**2 - y, x, y)
assert f.as_expr() == -y + x**2 + 2*x*y**2
assert f.as_expr({x: 5}) == 25 - y + 10*y**2
assert f.as_expr({y: 6}) == -6 + 72*x + x**2
assert f.as_expr({x: 5, y: 6}) == 379
assert f.as_expr(5, 6) == 379
raises(GeneratorsError, lambda: f.as_expr({z: 7}))
def test_Poly_lift():
assert Poly(x**4 - I*x + 17*I, x, gaussian=True).lift() == \
Poly(x**16 + 2*x**10 + 578*x**8 + x**4 - 578*x**2 + 83521,
x, domain='QQ')
def test_Poly_deflate():
assert Poly(0, x).deflate() == ((1,), Poly(0, x))
assert Poly(1, x).deflate() == ((1,), Poly(1, x))
assert Poly(x, x).deflate() == ((1,), Poly(x, x))
assert Poly(x**2, x).deflate() == ((2,), Poly(x, x))
assert Poly(x**17, x).deflate() == ((17,), Poly(x, x))
assert Poly(
x**2*y*z**11 + x**4*z**11).deflate() == ((2, 1, 11), Poly(x*y*z + x**2*z))
def test_Poly_inject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x)
assert f.inject() == Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.inject(front=True) == Poly(y**3*x + y*x**2 + y*x + 1, y, x)
def test_Poly_eject():
f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
assert f.eject(x) == Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
assert f.eject(y) == Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
ex = x + y + z + t + w
g = Poly(ex, x, y, z, t, w)
assert g.eject(x) == Poly(ex, y, z, t, w, domain='ZZ[x]')
assert g.eject(x, y) == Poly(ex, z, t, w, domain='ZZ[x, y]')
assert g.eject(x, y, z) == Poly(ex, t, w, domain='ZZ[x, y, z]')
assert g.eject(w) == Poly(ex, x, y, z, t, domain='ZZ[w]')
assert g.eject(t, w) == Poly(ex, x, y, z, domain='ZZ[w, t]')
assert g.eject(z, t, w) == Poly(ex, x, y, domain='ZZ[w, t, z]')
raises(DomainError, lambda: Poly(x*y, x, y, domain=ZZ[z]).eject(y))
raises(NotImplementedError, lambda: Poly(x*y, x, y, z).eject(y))
def test_Poly_exclude():
assert Poly(x, x, y).exclude() == Poly(x, x)
assert Poly(x*y, x, y).exclude() == Poly(x*y, x, y)
assert Poly(1, x, y).exclude() == Poly(1, x, y)
def test_Poly__gen_to_level():
assert Poly(1, x, y)._gen_to_level(-2) == 0
assert Poly(1, x, y)._gen_to_level(-1) == 1
assert Poly(1, x, y)._gen_to_level( 0) == 0
assert Poly(1, x, y)._gen_to_level( 1) == 1
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level(-3))
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level( 2))
assert Poly(1, x, y)._gen_to_level(x) == 0
assert Poly(1, x, y)._gen_to_level(y) == 1
assert Poly(1, x, y)._gen_to_level('x') == 0
assert Poly(1, x, y)._gen_to_level('y') == 1
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level(z))
raises(PolynomialError, lambda: Poly(1, x, y)._gen_to_level('z'))
def test_Poly_degree():
assert Poly(0, x).degree() == -oo
assert Poly(1, x).degree() == 0
assert Poly(x, x).degree() == 1
assert Poly(0, x).degree(gen=0) == -oo
assert Poly(1, x).degree(gen=0) == 0
assert Poly(x, x).degree(gen=0) == 1
assert Poly(0, x).degree(gen=x) == -oo
assert Poly(1, x).degree(gen=x) == 0
assert Poly(x, x).degree(gen=x) == 1
assert Poly(0, x).degree(gen='x') == -oo
assert Poly(1, x).degree(gen='x') == 0
assert Poly(x, x).degree(gen='x') == 1
raises(PolynomialError, lambda: Poly(1, x).degree(gen=1))
raises(PolynomialError, lambda: Poly(1, x).degree(gen=y))
raises(PolynomialError, lambda: Poly(1, x).degree(gen='y'))
assert Poly(1, x, y).degree() == 0
assert Poly(2*y, x, y).degree() == 0
assert Poly(x*y, x, y).degree() == 1
assert Poly(1, x, y).degree(gen=x) == 0
assert Poly(2*y, x, y).degree(gen=x) == 0
assert Poly(x*y, x, y).degree(gen=x) == 1
assert Poly(1, x, y).degree(gen=y) == 0
assert Poly(2*y, x, y).degree(gen=y) == 1
assert Poly(x*y, x, y).degree(gen=y) == 1
assert degree(1, x) == 0
assert degree(x, x) == 1
assert degree(x*y**2, gen=x) == 1
assert degree(x*y**2, gen=y) == 2
assert degree(x*y**2, x, y) == 1
assert degree(x*y**2, y, x) == 2
raises(ComputationFailed, lambda: degree(1))
def test_Poly_degree_list():
assert Poly(0, x).degree_list() == (-oo,)
assert Poly(0, x, y).degree_list() == (-oo, -oo)
assert Poly(0, x, y, z).degree_list() == (-oo, -oo, -oo)
assert Poly(1, x).degree_list() == (0,)
assert Poly(1, x, y).degree_list() == (0, 0)
assert Poly(1, x, y, z).degree_list() == (0, 0, 0)
assert Poly(x**2*y + x**3*z**2 + 1).degree_list() == (3, 1, 2)
assert degree_list(1, x) == (0,)
assert degree_list(x, x) == (1,)
assert degree_list(x*y**2) == (1, 2)
raises(ComputationFailed, lambda: degree_list(1))
def test_Poly_total_degree():
assert Poly(x**2*y + x**3*z**2 + 1).total_degree() == 5
assert Poly(x**2 + z**3).total_degree() == 3
assert Poly(x*y*z + z**4).total_degree() == 4
assert Poly(x**3 + x + 1).total_degree() == 3
def test_Poly_homogenize():
assert Poly(x**2+y).homogenize(z) == Poly(x**2+y*z)
assert Poly(x+y).homogenize(z) == Poly(x+y, x, y, z)
assert Poly(x+y**2).homogenize(y) == Poly(x*y+y**2)
def test_Poly_homogeneous_order():
assert Poly(0, x, y).homogeneous_order() == -oo
assert Poly(1, x, y).homogeneous_order() == 0
assert Poly(x, x, y).homogeneous_order() == 1
assert Poly(x*y, x, y).homogeneous_order() == 2
assert Poly(x + 1, x, y).homogeneous_order() is None
assert Poly(x*y + x, x, y).homogeneous_order() is None
assert Poly(x**5 + 2*x**3*y**2 + 9*x*y**4).homogeneous_order() == 5
assert Poly(x**5 + 2*x**3*y**3 + 9*x*y**4).homogeneous_order() is None
def test_Poly_LC():
assert Poly(0, x).LC() == 0
assert Poly(1, x).LC() == 1
assert Poly(2*x**2 + x, x).LC() == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('lex') == 2
assert Poly(x*y**7 + 2*x**2*y**3).LC('grlex') == 1
assert LC(x*y**7 + 2*x**2*y**3, order='lex') == 2
assert LC(x*y**7 + 2*x**2*y**3, order='grlex') == 1
def test_Poly_TC():
assert Poly(0, x).TC() == 0
assert Poly(1, x).TC() == 1
assert Poly(2*x**2 + x, x).TC() == 0
def test_Poly_EC():
assert Poly(0, x).EC() == 0
assert Poly(1, x).EC() == 1
assert Poly(2*x**2 + x, x).EC() == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('lex') == 1
assert Poly(x*y**7 + 2*x**2*y**3).EC('grlex') == 2
def test_Poly_coeff():
assert Poly(0, x).coeff_monomial(1) == 0
assert Poly(0, x).coeff_monomial(x) == 0
assert Poly(1, x).coeff_monomial(1) == 1
assert Poly(1, x).coeff_monomial(x) == 0
assert Poly(x**8, x).coeff_monomial(1) == 0
assert Poly(x**8, x).coeff_monomial(x**7) == 0
assert Poly(x**8, x).coeff_monomial(x**8) == 1
assert Poly(x**8, x).coeff_monomial(x**9) == 0
assert Poly(3*x*y**2 + 1, x, y).coeff_monomial(1) == 1
assert Poly(3*x*y**2 + 1, x, y).coeff_monomial(x*y**2) == 3
p = Poly(24*x*y*exp(8) + 23*x, x, y)
assert p.coeff_monomial(x) == 23
assert p.coeff_monomial(y) == 0
assert p.coeff_monomial(x*y) == 24*exp(8)
assert p.as_expr().coeff(x) == 24*y*exp(8) + 23
raises(NotImplementedError, lambda: p.coeff(x))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(0))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(3*x))
raises(ValueError, lambda: Poly(x + 1).coeff_monomial(3*x*y))
def test_Poly_nth():
assert Poly(0, x).nth(0) == 0
assert Poly(0, x).nth(1) == 0
assert Poly(1, x).nth(0) == 1
assert Poly(1, x).nth(1) == 0
assert Poly(x**8, x).nth(0) == 0
assert Poly(x**8, x).nth(7) == 0
assert Poly(x**8, x).nth(8) == 1
assert Poly(x**8, x).nth(9) == 0
assert Poly(3*x*y**2 + 1, x, y).nth(0, 0) == 1
assert Poly(3*x*y**2 + 1, x, y).nth(1, 2) == 3
raises(ValueError, lambda: Poly(x*y + 1, x, y).nth(1))
def test_Poly_LM():
assert Poly(0, x).LM() == (0,)
assert Poly(1, x).LM() == (0,)
assert Poly(2*x**2 + x, x).LM() == (2,)
assert Poly(x*y**7 + 2*x**2*y**3).LM('lex') == (2, 3)
assert Poly(x*y**7 + 2*x**2*y**3).LM('grlex') == (1, 7)
assert LM(x*y**7 + 2*x**2*y**3, order='lex') == x**2*y**3
assert LM(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_LM_custom_order():
f = Poly(x**2*y**3*z + x**2*y*z**3 + x*y*z + 1)
rev_lex = lambda monom: tuple(reversed(monom))
assert f.LM(order='lex') == (2, 3, 1)
assert f.LM(order=rev_lex) == (2, 1, 3)
def test_Poly_EM():
assert Poly(0, x).EM() == (0,)
assert Poly(1, x).EM() == (0,)
assert Poly(2*x**2 + x, x).EM() == (1,)
assert Poly(x*y**7 + 2*x**2*y**3).EM('lex') == (1, 7)
assert Poly(x*y**7 + 2*x**2*y**3).EM('grlex') == (2, 3)
def test_Poly_LT():
assert Poly(0, x).LT() == ((0,), 0)
assert Poly(1, x).LT() == ((0,), 1)
assert Poly(2*x**2 + x, x).LT() == ((2,), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('lex') == ((2, 3), 2)
assert Poly(x*y**7 + 2*x**2*y**3).LT('grlex') == ((1, 7), 1)
assert LT(x*y**7 + 2*x**2*y**3, order='lex') == 2*x**2*y**3
assert LT(x*y**7 + 2*x**2*y**3, order='grlex') == x*y**7
def test_Poly_ET():
assert Poly(0, x).ET() == ((0,), 0)
assert Poly(1, x).ET() == ((0,), 1)
assert Poly(2*x**2 + x, x).ET() == ((1,), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('lex') == ((1, 7), 1)
assert Poly(x*y**7 + 2*x**2*y**3).ET('grlex') == ((2, 3), 2)
def test_Poly_max_norm():
assert Poly(-1, x).max_norm() == 1
assert Poly( 0, x).max_norm() == 0
assert Poly( 1, x).max_norm() == 1
def test_Poly_l1_norm():
assert Poly(-1, x).l1_norm() == 1
assert Poly( 0, x).l1_norm() == 0
assert Poly( 1, x).l1_norm() == 1
def test_Poly_clear_denoms():
coeff, poly = Poly(x + 2, x).clear_denoms()
assert coeff == 1 and poly == Poly(
x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/2 + 1, x).clear_denoms()
assert coeff == 2 and poly == Poly(
x + 2, x, domain='QQ') and poly.get_domain() == QQ
coeff, poly = Poly(x/2 + 1, x).clear_denoms(convert=True)
assert coeff == 2 and poly == Poly(
x + 2, x, domain='ZZ') and poly.get_domain() == ZZ
coeff, poly = Poly(x/y + 1, x).clear_denoms(convert=True)
assert coeff == y and poly == Poly(
x + y, x, domain='ZZ[y]') and poly.get_domain() == ZZ[y]
coeff, poly = Poly(x/3 + sqrt(2), x, domain='EX').clear_denoms()
assert coeff == 3 and poly == Poly(
x + 3*sqrt(2), x, domain='EX') and poly.get_domain() == EX
coeff, poly = Poly(
x/3 + sqrt(2), x, domain='EX').clear_denoms(convert=True)
assert coeff == 3 and poly == Poly(
x + 3*sqrt(2), x, domain='EX') and poly.get_domain() == EX
def test_Poly_rat_clear_denoms():
f = Poly(x**2/y + 1, x)
g = Poly(x**3 + y, x)
assert f.rat_clear_denoms(g) == \
(Poly(x**2 + y, x), Poly(y*x**3 + y**2, x))
f = f.set_domain(EX)
g = g.set_domain(EX)
assert f.rat_clear_denoms(g) == (f, g)
def test_Poly_integrate():
assert Poly(x + 1).integrate() == Poly(x**2/2 + x)
assert Poly(x + 1).integrate(x) == Poly(x**2/2 + x)
assert Poly(x + 1).integrate((x, 1)) == Poly(x**2/2 + x)
assert Poly(x*y + 1).integrate(x) == Poly(x**2*y/2 + x)
assert Poly(x*y + 1).integrate(y) == Poly(x*y**2/2 + y)
assert Poly(x*y + 1).integrate(x, x) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate(y, y) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate((x, 2)) == Poly(x**3*y/6 + x**2/2)
assert Poly(x*y + 1).integrate((y, 2)) == Poly(x*y**3/6 + y**2/2)
assert Poly(x*y + 1).integrate(x, y) == Poly(x**2*y**2/4 + x*y)
assert Poly(x*y + 1).integrate(y, x) == Poly(x**2*y**2/4 + x*y)
def test_Poly_diff():
assert Poly(x**2 + x).diff() == Poly(2*x + 1)
assert Poly(x**2 + x).diff(x) == Poly(2*x + 1)
assert Poly(x**2 + x).diff((x, 1)) == Poly(2*x + 1)
assert Poly(x**2*y**2 + x*y).diff(x) == Poly(2*x*y**2 + y)
assert Poly(x**2*y**2 + x*y).diff(y) == Poly(2*x**2*y + x)
assert Poly(x**2*y**2 + x*y).diff(x, x) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(y, y) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((x, 2)) == Poly(2*y**2, x, y)
assert Poly(x**2*y**2 + x*y).diff((y, 2)) == Poly(2*x**2, x, y)
assert Poly(x**2*y**2 + x*y).diff(x, y) == Poly(4*x*y + 1)
assert Poly(x**2*y**2 + x*y).diff(y, x) == Poly(4*x*y + 1)
def test_issue_9585():
assert diff(Poly(x**2 + x)) == Poly(2*x + 1)
assert diff(Poly(x**2 + x), x, evaluate=False) == \
Derivative(Poly(x**2 + x), x)
assert Derivative(Poly(x**2 + x), x).doit() == Poly(2*x + 1)
def test_Poly_eval():
assert Poly(0, x).eval(7) == 0
assert Poly(1, x).eval(7) == 1
assert Poly(x, x).eval(7) == 7
assert Poly(0, x).eval(0, 7) == 0
assert Poly(1, x).eval(0, 7) == 1
assert Poly(x, x).eval(0, 7) == 7
assert Poly(0, x).eval(x, 7) == 0
assert Poly(1, x).eval(x, 7) == 1
assert Poly(x, x).eval(x, 7) == 7
assert Poly(0, x).eval('x', 7) == 0
assert Poly(1, x).eval('x', 7) == 1
assert Poly(x, x).eval('x', 7) == 7
raises(PolynomialError, lambda: Poly(1, x).eval(1, 7))
raises(PolynomialError, lambda: Poly(1, x).eval(y, 7))
raises(PolynomialError, lambda: Poly(1, x).eval('y', 7))
assert Poly(123, x, y).eval(7) == Poly(123, y)
assert Poly(2*y, x, y).eval(7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(7) == Poly(7*y, y)
assert Poly(123, x, y).eval(x, 7) == Poly(123, y)
assert Poly(2*y, x, y).eval(x, 7) == Poly(2*y, y)
assert Poly(x*y, x, y).eval(x, 7) == Poly(7*y, y)
assert Poly(123, x, y).eval(y, 7) == Poly(123, x)
assert Poly(2*y, x, y).eval(y, 7) == Poly(14, x)
assert Poly(x*y, x, y).eval(y, 7) == Poly(7*x, x)
assert Poly(x*y + y, x, y).eval({x: 7}) == Poly(8*y, y)
assert Poly(x*y + y, x, y).eval({y: 7}) == Poly(7*x + 7, x)
assert Poly(x*y + y, x, y).eval({x: 6, y: 7}) == 49
assert Poly(x*y + y, x, y).eval({x: 7, y: 6}) == 48
assert Poly(x*y + y, x, y).eval((6, 7)) == 49
assert Poly(x*y + y, x, y).eval([6, 7]) == 49
assert Poly(x + 1, domain='ZZ').eval(S(1)/2) == S(3)/2
assert Poly(x + 1, domain='ZZ').eval(sqrt(2)) == sqrt(2) + 1
raises(ValueError, lambda: Poly(x*y + y, x, y).eval((6, 7, 8)))
raises(DomainError, lambda: Poly(x + 1, domain='ZZ').eval(S(1)/2, auto=False))
# issue 6344
alpha = Symbol('alpha')
result = (2*alpha*z - 2*alpha + z**2 + 3)/(z**2 - 2*z + 1)
f = Poly(x**2 + (alpha - 1)*x - alpha + 1, x, domain='ZZ[alpha]')
assert f.eval((z + 1)/(z - 1)) == result
g = Poly(x**2 + (alpha - 1)*x - alpha + 1, x, y, domain='ZZ[alpha]')
assert g.eval((z + 1)/(z - 1)) == Poly(result, y, domain='ZZ(alpha,z)')
def test_Poly___call__():
f = Poly(2*x*y + 3*x + y + 2*z)
assert f(2) == Poly(5*y + 2*z + 6)
assert f(2, 5) == Poly(2*z + 31)
assert f(2, 5, 7) == 45
def test_parallel_poly_from_expr():
assert parallel_poly_from_expr(
[x - 1, x**2 - 1], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), x**2 - 1], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, Poly(x**2 - 1, x)], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr([Poly(
x - 1, x), Poly(x**2 - 1, x)], x)[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, x**2 - 1], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([Poly(
x - 1, x), x**2 - 1], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([x - 1, Poly(
x**2 - 1, x)], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr([Poly(x - 1, x), Poly(
x**2 - 1, x)], x, y)[0] == [Poly(x - 1, x, y), Poly(x**2 - 1, x, y)]
assert parallel_poly_from_expr(
[x - 1, x**2 - 1])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), x**2 - 1])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x - 1, Poly(x**2 - 1, x)])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[Poly(x - 1, x), Poly(x**2 - 1, x)])[0] == [Poly(x - 1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, x**2 - 1])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, x**2 - 1])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, Poly(x**2 - 1, x)])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[1, Poly(x**2 - 1, x)])[0] == [Poly(1, x), Poly(x**2 - 1, x)]
assert parallel_poly_from_expr(
[x**2 - 1, 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[x**2 - 1, 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[Poly(x**2 - 1, x), 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr(
[Poly(x**2 - 1, x), 1])[0] == [Poly(x**2 - 1, x), Poly(1, x)]
assert parallel_poly_from_expr([Poly(x, x, y), Poly(y, x, y)], x, y, order='lex')[0] == \
[Poly(x, x, y, domain='ZZ'), Poly(y, x, y, domain='ZZ')]
raises(PolificationFailed, lambda: parallel_poly_from_expr([0, 1]))
def test_pdiv():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.pdiv(G) == (Q, R)
assert F.prem(G) == R
assert F.pquo(G) == Q
assert F.pexquo(G) == Q
assert pdiv(f, g) == (q, r)
assert prem(f, g) == r
assert pquo(f, g) == q
assert pexquo(f, g) == q
assert pdiv(f, g, x, y) == (q, r)
assert prem(f, g, x, y) == r
assert pquo(f, g, x, y) == q
assert pexquo(f, g, x, y) == q
assert pdiv(f, g, (x, y)) == (q, r)
assert prem(f, g, (x, y)) == r
assert pquo(f, g, (x, y)) == q
assert pexquo(f, g, (x, y)) == q
assert pdiv(F, G) == (Q, R)
assert prem(F, G) == R
assert pquo(F, G) == Q
assert pexquo(F, G) == Q
assert pdiv(f, g, polys=True) == (Q, R)
assert prem(f, g, polys=True) == R
assert pquo(f, g, polys=True) == Q
assert pexquo(f, g, polys=True) == Q
assert pdiv(F, G, polys=False) == (q, r)
assert prem(F, G, polys=False) == r
assert pquo(F, G, polys=False) == q
assert pexquo(F, G, polys=False) == q
raises(ComputationFailed, lambda: pdiv(4, 2))
raises(ComputationFailed, lambda: prem(4, 2))
raises(ComputationFailed, lambda: pquo(4, 2))
raises(ComputationFailed, lambda: pexquo(4, 2))
def test_div():
f, g = x**2 - y**2, x - y
q, r = x + y, 0
F, G, Q, R = [ Poly(h, x, y) for h in (f, g, q, r) ]
assert F.div(G) == (Q, R)
assert F.rem(G) == R
assert F.quo(G) == Q
assert F.exquo(G) == Q
assert div(f, g) == (q, r)
assert rem(f, g) == r
assert quo(f, g) == q
assert exquo(f, g) == q
assert div(f, g, x, y) == (q, r)
assert rem(f, g, x, y) == r
assert quo(f, g, x, y) == q
assert exquo(f, g, x, y) == q
assert div(f, g, (x, y)) == (q, r)
assert rem(f, g, (x, y)) == r
assert quo(f, g, (x, y)) == q
assert exquo(f, g, (x, y)) == q
assert div(F, G) == (Q, R)
assert rem(F, G) == R
assert quo(F, G) == Q
assert exquo(F, G) == Q
assert div(f, g, polys=True) == (Q, R)
assert rem(f, g, polys=True) == R
assert quo(f, g, polys=True) == Q
assert exquo(f, g, polys=True) == Q
assert div(F, G, polys=False) == (q, r)
assert rem(F, G, polys=False) == r
assert quo(F, G, polys=False) == q
assert exquo(F, G, polys=False) == q
raises(ComputationFailed, lambda: div(4, 2))
raises(ComputationFailed, lambda: rem(4, 2))
raises(ComputationFailed, lambda: quo(4, 2))
raises(ComputationFailed, lambda: exquo(4, 2))
f, g = x**2 + 1, 2*x - 4
qz, rz = 0, x**2 + 1
qq, rq = x/2 + 1, 5
assert div(f, g) == (qq, rq)
assert div(f, g, auto=True) == (qq, rq)
assert div(f, g, auto=False) == (qz, rz)
assert div(f, g, domain=ZZ) == (qz, rz)
assert div(f, g, domain=QQ) == (qq, rq)
assert div(f, g, domain=ZZ, auto=True) == (qq, rq)
assert div(f, g, domain=ZZ, auto=False) == (qz, rz)
assert div(f, g, domain=QQ, auto=True) == (qq, rq)
assert div(f, g, domain=QQ, auto=False) == (qq, rq)
assert rem(f, g) == rq
assert rem(f, g, auto=True) == rq
assert rem(f, g, auto=False) == rz
assert rem(f, g, domain=ZZ) == rz
assert rem(f, g, domain=QQ) == rq
assert rem(f, g, domain=ZZ, auto=True) == rq
assert rem(f, g, domain=ZZ, auto=False) == rz
assert rem(f, g, domain=QQ, auto=True) == rq
assert rem(f, g, domain=QQ, auto=False) == rq
assert quo(f, g) == qq
assert quo(f, g, auto=True) == qq
assert quo(f, g, auto=False) == qz
assert quo(f, g, domain=ZZ) == qz
assert quo(f, g, domain=QQ) == qq
assert quo(f, g, domain=ZZ, auto=True) == qq
assert quo(f, g, domain=ZZ, auto=False) == qz
assert quo(f, g, domain=QQ, auto=True) == qq
assert quo(f, g, domain=QQ, auto=False) == qq
f, g, q = x**2, 2*x, x/2
assert exquo(f, g) == q
assert exquo(f, g, auto=True) == q
raises(ExactQuotientFailed, lambda: exquo(f, g, auto=False))
raises(ExactQuotientFailed, lambda: exquo(f, g, domain=ZZ))
assert exquo(f, g, domain=QQ) == q
assert exquo(f, g, domain=ZZ, auto=True) == q
raises(ExactQuotientFailed, lambda: exquo(f, g, domain=ZZ, auto=False))
assert exquo(f, g, domain=QQ, auto=True) == q
assert exquo(f, g, domain=QQ, auto=False) == q
f, g = Poly(x**2), Poly(x)
q, r = f.div(g)
assert q.get_domain().is_ZZ and r.get_domain().is_ZZ
r = f.rem(g)
assert r.get_domain().is_ZZ
q = f.quo(g)
assert q.get_domain().is_ZZ
q = f.exquo(g)
assert q.get_domain().is_ZZ
def test_gcdex():
f, g = 2*x, x**2 - 16
s, t, h = x/32, -Rational(1, 16), 1
F, G, S, T, H = [ Poly(u, x, domain='QQ') for u in (f, g, s, t, h) ]
assert F.half_gcdex(G) == (S, H)
assert F.gcdex(G) == (S, T, H)
assert F.invert(G) == S
assert half_gcdex(f, g) == (s, h)
assert gcdex(f, g) == (s, t, h)
assert invert(f, g) == s
assert half_gcdex(f, g, x) == (s, h)
assert gcdex(f, g, x) == (s, t, h)
assert invert(f, g, x) == s
assert half_gcdex(f, g, (x,)) == (s, h)
assert gcdex(f, g, (x,)) == (s, t, h)
assert invert(f, g, (x,)) == s
assert half_gcdex(F, G) == (S, H)
assert gcdex(F, G) == (S, T, H)
assert invert(F, G) == S
assert half_gcdex(f, g, polys=True) == (S, H)
assert gcdex(f, g, polys=True) == (S, T, H)
assert invert(f, g, polys=True) == S
assert half_gcdex(F, G, polys=False) == (s, h)
assert gcdex(F, G, polys=False) == (s, t, h)
assert invert(F, G, polys=False) == s
assert half_gcdex(100, 2004) == (-20, 4)
assert gcdex(100, 2004) == (-20, 1, 4)
assert invert(3, 7) == 5
raises(DomainError, lambda: half_gcdex(x + 1, 2*x + 1, auto=False))
raises(DomainError, lambda: gcdex(x + 1, 2*x + 1, auto=False))
raises(DomainError, lambda: invert(x + 1, 2*x + 1, auto=False))
def test_revert():
f = Poly(1 - x**2/2 + x**4/24 - x**6/720)
g = Poly(61*x**6/720 + 5*x**4/24 + x**2/2 + 1)
assert f.revert(8) == g
def test_subresultants():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 2*x - 2
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.subresultants(G) == [F, G, H]
assert subresultants(f, g) == [f, g, h]
assert subresultants(f, g, x) == [f, g, h]
assert subresultants(f, g, (x,)) == [f, g, h]
assert subresultants(F, G) == [F, G, H]
assert subresultants(f, g, polys=True) == [F, G, H]
assert subresultants(F, G, polys=False) == [f, g, h]
raises(ComputationFailed, lambda: subresultants(4, 2))
def test_resultant():
f, g, h = x**2 - 2*x + 1, x**2 - 1, 0
F, G = Poly(f), Poly(g)
assert F.resultant(G) == h
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == h
assert resultant(f, g, polys=True) == h
assert resultant(F, G, polys=False) == h
assert resultant(f, g, includePRS=True) == (h, [f, g, 2*x - 2])
f, g, h = x - a, x - b, a - b
F, G, H = Poly(f), Poly(g), Poly(h)
assert F.resultant(G) == H
assert resultant(f, g) == h
assert resultant(f, g, x) == h
assert resultant(f, g, (x,)) == h
assert resultant(F, G) == H
assert resultant(f, g, polys=True) == H
assert resultant(F, G, polys=False) == h
raises(ComputationFailed, lambda: resultant(4, 2))
def test_discriminant():
f, g = x**3 + 3*x**2 + 9*x - 13, -11664
F = Poly(f)
assert F.discriminant() == g
assert discriminant(f) == g
assert discriminant(f, x) == g
assert discriminant(f, (x,)) == g
assert discriminant(F) == g
assert discriminant(f, polys=True) == g
assert discriminant(F, polys=False) == g
f, g = a*x**2 + b*x + c, b**2 - 4*a*c
F, G = Poly(f), Poly(g)
assert F.discriminant() == G
assert discriminant(f) == g
assert discriminant(f, x, a, b, c) == g
assert discriminant(f, (x, a, b, c)) == g
assert discriminant(F) == G
assert discriminant(f, polys=True) == G
assert discriminant(F, polys=False) == g
raises(ComputationFailed, lambda: discriminant(4))
def test_dispersion():
# We test only the API here. For more mathematical
# tests see the dedicated test file.
fp = poly((x + 1)*(x + 2), x)
assert sorted(fp.dispersionset()) == [0, 1]
assert fp.dispersion() == 1
fp = poly(x**4 - 3*x**2 + 1, x)
gp = fp.shift(-3)
assert sorted(fp.dispersionset(gp)) == [2, 3, 4]
assert fp.dispersion(gp) == 4
def test_gcd_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert gcd_list(F) == x - 1
assert gcd_list(F, polys=True) == Poly(x - 1)
assert gcd_list([]) == 0
assert gcd_list([1, 2]) == 1
assert gcd_list([4, 6, 8]) == 2
assert gcd_list([x*(y + 42) - x*y - x*42]) == 0
gcd = gcd_list([], x)
assert gcd.is_Number and gcd is S.Zero
gcd = gcd_list([], x, polys=True)
assert gcd.is_Poly and gcd.is_zero
raises(ComputationFailed, lambda: gcd_list([], polys=True))
def test_lcm_list():
F = [x**3 - 1, x**2 - 1, x**2 - 3*x + 2]
assert lcm_list(F) == x**5 - x**4 - 2*x**3 - x**2 + x + 2
assert lcm_list(F, polys=True) == Poly(x**5 - x**4 - 2*x**3 - x**2 + x + 2)
assert lcm_list([]) == 1
assert lcm_list([1, 2]) == 2
assert lcm_list([4, 6, 8]) == 24
assert lcm_list([x*(y + 42) - x*y - x*42]) == 0
lcm = lcm_list([], x)
assert lcm.is_Number and lcm is S.One
lcm = lcm_list([], x, polys=True)
assert lcm.is_Poly and lcm.is_one
raises(ComputationFailed, lambda: lcm_list([], polys=True))
def test_gcd():
f, g = x**3 - 1, x**2 - 1
s, t = x**2 + x + 1, x + 1
h, r = x - 1, x**4 + x**3 - x - 1
F, G, S, T, H, R = [ Poly(u) for u in (f, g, s, t, h, r) ]
assert F.cofactors(G) == (H, S, T)
assert F.gcd(G) == H
assert F.lcm(G) == R
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == r
assert cofactors(f, g, x) == (h, s, t)
assert gcd(f, g, x) == h
assert lcm(f, g, x) == r
assert cofactors(f, g, (x,)) == (h, s, t)
assert gcd(f, g, (x,)) == h
assert lcm(f, g, (x,)) == r
assert cofactors(F, G) == (H, S, T)
assert gcd(F, G) == H
assert lcm(F, G) == R
assert cofactors(f, g, polys=True) == (H, S, T)
assert gcd(f, g, polys=True) == H
assert lcm(f, g, polys=True) == R
assert cofactors(F, G, polys=False) == (h, s, t)
assert gcd(F, G, polys=False) == h
assert lcm(F, G, polys=False) == r
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
f, g = 1.0*x**2 - 1.0, 1.0*x - 1.0
h, s, t = g, 1.0*x + 1.0, 1.0
assert cofactors(f, g) == (h, s, t)
assert gcd(f, g) == h
assert lcm(f, g) == f
assert cofactors(8, 6) == (2, 4, 3)
assert gcd(8, 6) == 2
assert lcm(8, 6) == 24
f, g = x**2 - 3*x - 4, x**3 - 4*x**2 + x - 4
l = x**4 - 3*x**3 - 3*x**2 - 3*x - 4
h, s, t = x - 4, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11) == (h, s, t)
assert gcd(f, g, modulus=11) == h
assert lcm(f, g, modulus=11) == l
f, g = x**2 + 8*x + 7, x**3 + 7*x**2 + x + 7
l = x**4 + 8*x**3 + 8*x**2 + 8*x + 7
h, s, t = x + 7, x + 1, x**2 + 1
assert cofactors(f, g, modulus=11, symmetric=False) == (h, s, t)
assert gcd(f, g, modulus=11, symmetric=False) == h
assert lcm(f, g, modulus=11, symmetric=False) == l
raises(TypeError, lambda: gcd(x))
raises(TypeError, lambda: lcm(x))
def test_gcd_numbers_vs_polys():
assert isinstance(gcd(3, 9), Integer)
assert isinstance(gcd(3*x, 9), Integer)
assert gcd(3, 9) == 3
assert gcd(3*x, 9) == 3
assert isinstance(gcd(S(3)/2, S(9)/4), Rational)
assert isinstance(gcd(S(3)/2*x, S(9)/4), Rational)
assert gcd(S(3)/2, S(9)/4) == S(3)/4
assert gcd(S(3)/2*x, S(9)/4) == 1
assert isinstance(gcd(3.0, 9.0), Float)
assert isinstance(gcd(3.0*x, 9.0), Float)
assert gcd(3.0, 9.0) == 1.0
assert gcd(3.0*x, 9.0) == 1.0
def test_terms_gcd():
assert terms_gcd(1) == 1
assert terms_gcd(1, x) == 1
assert terms_gcd(x - 1) == x - 1
assert terms_gcd(-x - 1) == -x - 1
assert terms_gcd(2*x + 3) == 2*x + 3
assert terms_gcd(6*x + 4) == Mul(2, 3*x + 2, evaluate=False)
assert terms_gcd(x**3*y + x*y**3) == x*y*(x**2 + y**2)
assert terms_gcd(2*x**3*y + 2*x*y**3) == 2*x*y*(x**2 + y**2)
assert terms_gcd(x**3*y/2 + x*y**3/2) == x*y/2*(x**2 + y**2)
assert terms_gcd(x**3*y + 2*x*y**3) == x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y + 4*x*y**3) == 2*x*y*(x**2 + 2*y**2)
assert terms_gcd(2*x**3*y/3 + 4*x*y**3/5) == 2*x*y/15*(5*x**2 + 6*y**2)
assert terms_gcd(2.0*x**3*y + 4.1*x*y**3) == x*y*(2.0*x**2 + 4.1*y**2)
assert _aresame(terms_gcd(2.0*x + 3), 2.0*x + 3)
assert terms_gcd((3 + 3*x)*(x + x*y), expand=False) == \
(3*x + 3)*(x*y + x)
assert terms_gcd((3 + 3*x)*(x + x*sin(3 + 3*y)), expand=False, deep=True) == \
3*x*(x + 1)*(sin(Mul(3, y + 1, evaluate=False)) + 1)
assert terms_gcd(sin(x + x*y), deep=True) == \
sin(x*(y + 1))
eq = Eq(2*x, 2*y + 2*z*y)
assert terms_gcd(eq) == eq
assert terms_gcd(eq, deep=True) == Eq(2*x, 2*y*(z + 1))
def test_trunc():
f, g = x**5 + 2*x**4 + 3*x**3 + 4*x**2 + 5*x + 6, x**5 - x**4 + x**2 - x
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f, g = 6*x**5 + 5*x**4 + 4*x**3 + 3*x**2 + 2*x + 1, -x**4 + x**3 - x + 1
F, G = Poly(f), Poly(g)
assert F.trunc(3) == G
assert trunc(f, 3) == g
assert trunc(f, 3, x) == g
assert trunc(f, 3, (x,)) == g
assert trunc(F, 3) == G
assert trunc(f, 3, polys=True) == G
assert trunc(F, 3, polys=False) == g
f = Poly(x**2 + 2*x + 3, modulus=5)
assert f.trunc(2) == Poly(x**2 + 1, modulus=5)
def test_monic():
f, g = 2*x - 1, x - S(1)/2
F, G = Poly(f, domain='QQ'), Poly(g)
assert F.monic() == G
assert monic(f) == g
assert monic(f, x) == g
assert monic(f, (x,)) == g
assert monic(F) == G
assert monic(f, polys=True) == G
assert monic(F, polys=False) == g
raises(ComputationFailed, lambda: monic(4))
assert monic(2*x**2 + 6*x + 4, auto=False) == x**2 + 3*x + 2
raises(ExactQuotientFailed, lambda: monic(2*x + 6*x + 1, auto=False))
assert monic(2.0*x**2 + 6.0*x + 4.0) == 1.0*x**2 + 3.0*x + 2.0
assert monic(2*x**2 + 3*x + 4, modulus=5) == x**2 - x + 2
def test_content():
f, F = 4*x + 2, Poly(4*x + 2)
assert F.content() == 2
assert content(f) == 2
raises(ComputationFailed, lambda: content(4))
f = Poly(2*x, modulus=3)
assert f.content() == 1
def test_primitive():
f, g = 4*x + 2, 2*x + 1
F, G = Poly(f), Poly(g)
assert F.primitive() == (2, G)
assert primitive(f) == (2, g)
assert primitive(f, x) == (2, g)
assert primitive(f, (x,)) == (2, g)
assert primitive(F) == (2, G)
assert primitive(f, polys=True) == (2, G)
assert primitive(F, polys=False) == (2, g)
raises(ComputationFailed, lambda: primitive(4))
f = Poly(2*x, modulus=3)
g = Poly(2.0*x, domain=RR)
assert f.primitive() == (1, f)
assert g.primitive() == (1.0, g)
assert primitive(S('-3*x/4 + y + 11/8')) == \
S('(1/8, -6*x + 8*y + 11)')
def test_compose():
f = x**12 + 20*x**10 + 150*x**8 + 500*x**6 + 625*x**4 - 2*x**3 - 10*x + 9
g = x**4 - 2*x + 9
h = x**3 + 5*x
F, G, H = map(Poly, (f, g, h))
assert G.compose(H) == F
assert compose(g, h) == f
assert compose(g, h, x) == f
assert compose(g, h, (x,)) == f
assert compose(G, H) == F
assert compose(g, h, polys=True) == F
assert compose(G, H, polys=False) == f
assert F.decompose() == [G, H]
assert decompose(f) == [g, h]
assert decompose(f, x) == [g, h]
assert decompose(f, (x,)) == [g, h]
assert decompose(F) == [G, H]
assert decompose(f, polys=True) == [G, H]
assert decompose(F, polys=False) == [g, h]
raises(ComputationFailed, lambda: compose(4, 2))
raises(ComputationFailed, lambda: decompose(4))
assert compose(x**2 - y**2, x - y, x, y) == x**2 - 2*x*y
assert compose(x**2 - y**2, x - y, y, x) == -y**2 + 2*x*y
def test_shift():
assert Poly(x**2 - 2*x + 1, x).shift(2) == Poly(x**2 + 2*x + 1, x)
def test_sturm():
f, F = x, Poly(x, domain='QQ')
g, G = 1, Poly(1, x, domain='QQ')
assert F.sturm() == [F, G]
assert sturm(f) == [f, g]
assert sturm(f, x) == [f, g]
assert sturm(f, (x,)) == [f, g]
assert sturm(F) == [F, G]
assert sturm(f, polys=True) == [F, G]
assert sturm(F, polys=False) == [f, g]
raises(ComputationFailed, lambda: sturm(4))
raises(DomainError, lambda: sturm(f, auto=False))
f = Poly(S(1024)/(15625*pi**8)*x**5
- S(4096)/(625*pi**8)*x**4
+ S(32)/(15625*pi**4)*x**3
- S(128)/(625*pi**4)*x**2
+ S(1)/62500*x
- S(1)/625, x, domain='ZZ(pi)')
assert sturm(f) == \
[Poly(x**3 - 100*x**2 + pi**4/64*x - 25*pi**4/16, x, domain='ZZ(pi)'),
Poly(3*x**2 - 200*x + pi**4/64, x, domain='ZZ(pi)'),
Poly((S(20000)/9 - pi**4/96)*x + 25*pi**4/18, x, domain='ZZ(pi)'),
Poly((-3686400000000*pi**4 - 11520000*pi**8 - 9*pi**12)/(26214400000000 - 245760000*pi**4 + 576*pi**8), x, domain='ZZ(pi)')]
def test_gff():
f = x**5 + 2*x**4 - x**3 - 2*x**2
assert Poly(f).gff_list() == [(Poly(x), 1), (Poly(x + 2), 4)]
assert gff_list(f) == [(x, 1), (x + 2, 4)]
raises(NotImplementedError, lambda: gff(f))
f = x*(x - 1)**3*(x - 2)**2*(x - 4)**2*(x - 5)
assert Poly(f).gff_list() == [(
Poly(x**2 - 5*x + 4), 1), (Poly(x**2 - 5*x + 4), 2), (Poly(x), 3)]
assert gff_list(f) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)]
raises(NotImplementedError, lambda: gff(f))
def test_sqf_norm():
assert sqf_norm(x**2 - 2, extension=sqrt(3)) == \
(1, x**2 - 2*sqrt(3)*x + 1, x**4 - 10*x**2 + 1)
assert sqf_norm(x**2 - 3, extension=sqrt(2)) == \
(1, x**2 - 2*sqrt(2)*x - 1, x**4 - 10*x**2 + 1)
assert Poly(x**2 - 2, extension=sqrt(3)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(3)*x + 1, x, extension=sqrt(3)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
assert Poly(x**2 - 3, extension=sqrt(2)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(2)*x - 1, x, extension=sqrt(2)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
def test_sqf():
f = x**5 - x**3 - x**2 + 1
g = x**3 + 2*x**2 + 2*x + 1
h = x - 1
p = x**4 + x**3 - x - 1
F, G, H, P = map(Poly, (f, g, h, p))
assert F.sqf_part() == P
assert sqf_part(f) == p
assert sqf_part(f, x) == p
assert sqf_part(f, (x,)) == p
assert sqf_part(F) == P
assert sqf_part(f, polys=True) == P
assert sqf_part(F, polys=False) == p
assert F.sqf_list() == (1, [(G, 1), (H, 2)])
assert sqf_list(f) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, x) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, (x,)) == (1, [(g, 1), (h, 2)])
assert sqf_list(F) == (1, [(G, 1), (H, 2)])
assert sqf_list(f, polys=True) == (1, [(G, 1), (H, 2)])
assert sqf_list(F, polys=False) == (1, [(g, 1), (h, 2)])
assert F.sqf_list_include() == [(G, 1), (H, 2)]
raises(ComputationFailed, lambda: sqf_part(4))
assert sqf(1) == 1
assert sqf_list(1) == (1, [])
assert sqf((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert sqf(f) == g*h**2
assert sqf(f, x) == g*h**2
assert sqf(f, (x,)) == g*h**2
d = x**2 + y**2
assert sqf(f/d) == (g*h**2)/d
assert sqf(f/d, x) == (g*h**2)/d
assert sqf(f/d, (x,)) == (g*h**2)/d
assert sqf(x - 1) == x - 1
assert sqf(-x - 1) == -x - 1
assert sqf(x - 1) == x - 1
assert sqf(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert sqf((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
assert sqf(Poly(x**2 - 2*x + 1)) == (x - 1)**2
f = 3 + x - x*(1 + x) + x**2
assert sqf(f) == 3
f = (x**2 + 2*x + 1)**20000000000
assert sqf(f) == (x + 1)**40000000000
assert sqf_list(f) == (1, [(x + 1, 40000000000)])
def test_factor():
f = x**5 - x**3 - x**2 + 1
u = x + 1
v = x - 1
w = x**2 + x + 1
F, U, V, W = map(Poly, (f, u, v, w))
assert F.factor_list() == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, x) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, (x,)) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(F) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f, polys=True) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(F, polys=False) == (1, [(u, 1), (v, 2), (w, 1)])
assert F.factor_list_include() == [(U, 1), (V, 2), (W, 1)]
assert factor_list(1) == (1, [])
assert factor_list(6) == (6, [])
assert factor_list(sqrt(3), x) == (1, [(3, S.Half)])
assert factor_list((-1)**x, x) == (1, [(-1, x)])
assert factor_list((2*x)**y, x) == (1, [(2, y), (x, y)])
assert factor_list(sqrt(x*y), x) == (1, [(x*y, S.Half)])
assert factor(6) == 6 and factor(6).is_Integer
assert factor_list(3*x) == (3, [(x, 1)])
assert factor_list(3*x**2) == (3, [(x, 2)])
assert factor(3*x) == 3*x
assert factor(3*x**2) == 3*x**2
assert factor((2*x**2 + 2)**7) == 128*(x**2 + 1)**7
assert factor(f) == u*v**2*w
assert factor(f, x) == u*v**2*w
assert factor(f, (x,)) == u*v**2*w
g, p, q, r = x**2 - y**2, x - y, x + y, x**2 + 1
assert factor(f/g) == (u*v**2*w)/(p*q)
assert factor(f/g, x) == (u*v**2*w)/(p*q)
assert factor(f/g, (x,)) == (u*v**2*w)/(p*q)
p = Symbol('p', positive=True)
i = Symbol('i', integer=True)
r = Symbol('r', real=True)
assert factor(sqrt(x*y)).is_Pow is True
assert factor(sqrt(3*x**2 - 3)) == sqrt(3)*sqrt((x - 1)*(x + 1))
assert factor(sqrt(3*x**2 + 3)) == sqrt(3)*sqrt(x**2 + 1)
assert factor((y*x**2 - y)**i) == y**i*(x - 1)**i*(x + 1)**i
assert factor((y*x**2 + y)**i) == y**i*(x**2 + 1)**i
assert factor((y*x**2 - y)**t) == (y*(x - 1)*(x + 1))**t
assert factor((y*x**2 + y)**t) == (y*(x**2 + 1))**t
f = sqrt(expand((r**2 + 1)*(p + 1)*(p - 1)*(p - 2)**3))
g = sqrt((p - 2)**3*(p - 1))*sqrt(p + 1)*sqrt(r**2 + 1)
assert factor(f) == g
assert factor(g) == g
g = (x - 1)**5*(r**2 + 1)
f = sqrt(expand(g))
assert factor(f) == sqrt(g)
f = Poly(sin(1)*x + 1, x, domain=EX)
assert f.factor_list() == (1, [(f, 1)])
f = x**4 + 1
assert factor(f) == f
assert factor(f, extension=I) == (x**2 - I)*(x**2 + I)
assert factor(f, gaussian=True) == (x**2 - I)*(x**2 + I)
assert factor(
f, extension=sqrt(2)) == (x**2 + sqrt(2)*x + 1)*(x**2 - sqrt(2)*x + 1)
f = x**2 + 2*sqrt(2)*x + 2
assert factor(f, extension=sqrt(2)) == (x + sqrt(2))**2
assert factor(f**3, extension=sqrt(2)) == (x + sqrt(2))**6
assert factor(x**2 - 2*y**2, extension=sqrt(2)) == \
(x + sqrt(2)*y)*(x - sqrt(2)*y)
assert factor(2*x**2 - 4*y**2, extension=sqrt(2)) == \
2*((x + sqrt(2)*y)*(x - sqrt(2)*y))
assert factor(x - 1) == x - 1
assert factor(-x - 1) == -x - 1
assert factor(x - 1) == x - 1
assert factor(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert factor(x**11 + x + 1, modulus=65537, symmetric=True) == \
(x**2 + x + 1)*(x**9 - x**8 + x**6 - x**5 + x**3 - x** 2 + 1)
assert factor(x**11 + x + 1, modulus=65537, symmetric=False) == \
(x**2 + x + 1)*(x**9 + 65536*x**8 + x**6 + 65536*x**5 +
x**3 + 65536*x** 2 + 1)
f = x/pi + x*sin(x)/pi
g = y/(pi**2 + 2*pi + 1) + y*sin(x)/(pi**2 + 2*pi + 1)
assert factor(f) == x*(sin(x) + 1)/pi
assert factor(g) == y*(sin(x) + 1)/(pi + 1)**2
assert factor(Eq(
x**2 + 2*x + 1, x**3 + 1)) == Eq((x + 1)**2, (x + 1)*(x**2 - x + 1))
f = (x**2 - 1)/(x**2 + 4*x + 4)
assert factor(f) == (x + 1)*(x - 1)/(x + 2)**2
assert factor(f, x) == (x + 1)*(x - 1)/(x + 2)**2
f = 3 + x - x*(1 + x) + x**2
assert factor(f) == 3
assert factor(f, x) == 3
assert factor(1/(x**2 + 2*x + 1/x) - 1) == -((1 - x + 2*x**2 +
x**3)/(1 + 2*x**2 + x**3))
assert factor(f, expand=False) == f
raises(PolynomialError, lambda: factor(f, x, expand=False))
raises(FlagError, lambda: factor(x**2 - 1, polys=True))
assert factor([x, Eq(x**2 - y**2, Tuple(x**2 - z**2, 1/x + 1/y))]) == \
[x, Eq((x - y)*(x + y), Tuple((x - z)*(x + z), (x + y)/x/y))]
assert not isinstance(
Poly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) is True
assert isinstance(
PurePoly(x**3 + x + 1).factor_list()[1][0][0], PurePoly) is True
assert factor(sqrt(-x)) == sqrt(-x)
# issue 5917
e = (-2*x*(-x + 1)*(x - 1)*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)*(x**2*(x -
1) - x*(x - 1) - x) - (-2*x**2*(x - 1)**2 - x*(-x + 1)*(-x*(-x + 1) +
x*(x - 1)))*(x**2*(x - 1)**4 - x*(-x*(-x + 1)*(x - 1) - x*(x - 1)**2)))
assert factor(e) == 0
# deep option
assert factor(sin(x**2 + x) + x, deep=True) == sin(x*(x + 1)) + x
assert factor(sqrt(x**2)) == sqrt(x**2)
def test_factor_large():
f = (x**2 + 4*x + 4)**10000000*(x**2 + 1)*(x**2 + 2*x + 1)**1234567
g = ((x**2 + 2*x + 1)**3000*y**2 + (x**2 + 2*x + 1)**3000*2*y + (
x**2 + 2*x + 1)**3000)
assert factor(f) == (x + 2)**20000000*(x**2 + 1)*(x + 1)**2469134
assert factor(g) == (x + 1)**6000*(y + 1)**2
assert factor_list(
f) == (1, [(x + 1, 2469134), (x + 2, 20000000), (x**2 + 1, 1)])
assert factor_list(g) == (1, [(y + 1, 2), (x + 1, 6000)])
f = (x**2 - y**2)**200000*(x**7 + 1)
g = (x**2 + y**2)**200000*(x**7 + 1)
assert factor(f) == \
(x + 1)*(x - y)**200000*(x + y)**200000*(x**6 - x**5 +
x**4 - x**3 + x**2 - x + 1)
assert factor(g, gaussian=True) == \
(x + 1)*(x - I*y)**200000*(x + I*y)**200000*(x**6 - x**5 +
x**4 - x**3 + x**2 - x + 1)
assert factor_list(f) == \
(1, [(x + 1, 1), (x - y, 200000), (x + y, 200000), (x**6 -
x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
assert factor_list(g, gaussian=True) == \
(1, [(x + 1, 1), (x - I*y, 200000), (x + I*y, 200000), (
x**6 - x**5 + x**4 - x**3 + x**2 - x + 1, 1)])
@XFAIL
def test_factor_noeval():
assert factor(6*x - 10) == 2*(3*x - 5)
assert factor((6*x - 10)/(3*x - 6)) == S(2)/3*((3*x - 5)/(x - 2))
def test_intervals():
assert intervals(0) == []
assert intervals(1) == []
assert intervals(x, sqf=True) == [(0, 0)]
assert intervals(x) == [((0, 0), 1)]
assert intervals(x**128) == [((0, 0), 128)]
assert intervals([x**2, x**4]) == [((0, 0), {0: 2, 1: 4})]
f = Poly((2*x/5 - S(17)/3)*(4*x + S(1)/257))
assert f.intervals(sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals() == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(fast=True, sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals(fast=True) == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(eps=S(1)/10) == f.intervals(eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/100) == f.intervals(eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/1000) == f.intervals(eps=0.001) == \
[((-S(1)/1002, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/10000) == f.intervals(eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = (2*x/5 - S(17)/3)*(4*x + S(1)/257)
assert intervals(f, sqf=True) == [(-1, 0), (14, 15)]
assert intervals(f) == [((-1, 0), 1), ((14, 15), 1)]
assert intervals(f, eps=S(1)/10) == intervals(f, eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/100) == intervals(f, eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/1000) == intervals(f, eps=0.001) == \
[((-S(1)/1002, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/10000) == intervals(f, eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = Poly((x**2 - 2)*(x**2 - 3)**7*(x + 1)*(7*x + 3)**3)
assert f.intervals() == \
[((-2, -S(3)/2), 7), ((-S(3)/2, -1), 1),
((-1, -1), 1), ((-1, 0), 3),
((1, S(3)/2), 1), ((S(3)/2, 2), 7)]
assert intervals([x**5 - 200, x**5 - 201]) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(309)/107, S(26)/9), {1: 1})]
assert intervals([x**5 - 200, x**5 - 201], fast=True) == \
[((S(75)/26, S(101)/35), {0: 1}), ((S(309)/107, S(26)/9), {1: 1})]
assert intervals([x**2 - 200, x**2 - 201]) == \
[((-S(71)/5, -S(85)/6), {1: 1}), ((-S(85)/6, -14), {0: 1}),
((14, S(85)/6), {0: 1}), ((S(85)/6, S(71)/5), {1: 1})]
assert intervals([x + 1, x + 2, x - 1, x + 1, 1, x - 1, x - 1, (x - 2)**2]) == \
[((-2, -2), {1: 1}), ((-1, -1), {0: 1, 3: 1}), ((1, 1), {2:
1, 5: 1, 6: 1}), ((2, 2), {7: 2})]
f, g, h = x**2 - 2, x**4 - 4*x**2 + 4, x - 1
assert intervals(f, inf=S(7)/4, sqf=True) == []
assert intervals(f, inf=S(7)/5, sqf=True) == [(S(7)/5, S(3)/2)]
assert intervals(f, sup=S(7)/4, sqf=True) == [(-2, -1), (1, S(3)/2)]
assert intervals(f, sup=S(7)/5, sqf=True) == [(-2, -1)]
assert intervals(g, inf=S(7)/4) == []
assert intervals(g, inf=S(7)/5) == [((S(7)/5, S(3)/2), 2)]
assert intervals(g, sup=S(7)/4) == [((-2, -1), 2), ((1, S(3)/2), 2)]
assert intervals(g, sup=S(7)/5) == [((-2, -1), 2)]
assert intervals([g, h], inf=S(7)/4) == []
assert intervals([g, h], inf=S(7)/5) == [((S(7)/5, S(3)/2), {0: 2})]
assert intervals([g, h], sup=S(
7)/4) == [((-2, -1), {0: 2}), ((1, 1), {1: 1}), ((1, S(3)/2), {0: 2})]
assert intervals(
[g, h], sup=S(7)/5) == [((-2, -1), {0: 2}), ((1, 1), {1: 1})]
assert intervals([x + 2, x**2 - 2]) == \
[((-2, -2), {0: 1}), ((-2, -1), {1: 1}), ((1, 2), {1: 1})]
assert intervals([x + 2, x**2 - 2], strict=True) == \
[((-2, -2), {0: 1}), ((-S(3)/2, -1), {1: 1}), ((1, 2), {1: 1})]
f = 7*z**4 - 19*z**3 + 20*z**2 + 17*z + 20
assert intervals(f) == []
real_part, complex_part = intervals(f, all=True, sqf=True)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(
a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
assert complex_part == [(-S(40)/7 - 40*I/7, 0), (-S(40)/7, 40*I/7),
(-40*I/7, S(40)/7), (0, S(40)/7 + 40*I/7)]
real_part, complex_part = intervals(f, all=True, sqf=True, eps=S(1)/10)
assert real_part == []
assert all(re(a) < re(r) < re(b) and im(
a) < im(r) < im(b) for (a, b), r in zip(complex_part, nroots(f)))
raises(ValueError, lambda: intervals(x**2 - 2, eps=10**-100000))
raises(ValueError, lambda: Poly(x**2 - 2).intervals(eps=10**-100000))
raises(
ValueError, lambda: intervals([x**2 - 2, x**2 - 3], eps=10**-100000))
def test_refine_root():
f = Poly(x**2 - 2)
assert f.refine_root(1, 2, steps=0) == (1, 2)
assert f.refine_root(-2, -1, steps=0) == (-2, -1)
assert f.refine_root(1, 2, steps=None) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=None) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1) == (-S(3)/2, -1)
assert f.refine_root(1, 2, steps=1, fast=True) == (1, S(3)/2)
assert f.refine_root(-2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert f.refine_root(1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert f.refine_root(1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, lambda: (f**2).refine_root(1, 2, check_sqf=True))
raises(RefinementFailed, lambda: (f**2).refine_root(1, 2))
raises(RefinementFailed, lambda: (f**2).refine_root(2, 3))
f = x**2 - 2
assert refine_root(f, 1, 2, steps=1) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, steps=1, fast=True) == (1, S(3)/2)
assert refine_root(f, -2, -1, steps=1, fast=True) == (-S(3)/2, -1)
assert refine_root(f, 1, 2, eps=S(1)/100) == (S(24)/17, S(17)/12)
assert refine_root(f, 1, 2, eps=1e-2) == (S(24)/17, S(17)/12)
raises(PolynomialError, lambda: refine_root(1, 7, 8, eps=S(1)/100))
raises(ValueError, lambda: Poly(f).refine_root(1, 2, eps=10**-100000))
raises(ValueError, lambda: refine_root(f, 1, 2, eps=10**-100000))
def test_count_roots():
assert count_roots(x**2 - 2) == 2
assert count_roots(x**2 - 2, inf=-oo) == 2
assert count_roots(x**2 - 2, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-oo, sup=+oo) == 2
assert count_roots(x**2 - 2, inf=-2) == 2
assert count_roots(x**2 - 2, inf=-1) == 1
assert count_roots(x**2 - 2, sup=1) == 1
assert count_roots(x**2 - 2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 - 2, inf=-1, sup=1) == 0
assert count_roots(x**2 - 2, inf=-2, sup=2) == 2
assert count_roots(x**2 + 2) == 0
assert count_roots(x**2 + 2, inf=-2*I) == 2
assert count_roots(x**2 + 2, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=-2*I, sup=+2*I) == 2
assert count_roots(x**2 + 2, inf=0) == 0
assert count_roots(x**2 + 2, sup=0) == 0
assert count_roots(x**2 + 2, inf=-I) == 1
assert count_roots(x**2 + 2, sup=+I) == 1
assert count_roots(x**2 + 2, inf=+I/2, sup=+I) == 0
assert count_roots(x**2 + 2, inf=-I, sup=-I/2) == 0
raises(PolynomialError, lambda: count_roots(1))
def test_Poly_root():
f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
assert f.root(0) == -S(1)/2
assert f.root(1) == 2
assert f.root(2) == 2
raises(IndexError, lambda: f.root(3))
assert Poly(x**5 + x + 1).root(0) == rootof(x**3 - x**2 + 1, 0)
def test_real_roots():
assert real_roots(x) == [0]
assert real_roots(x, multiple=False) == [(0, 1)]
assert real_roots(x**3) == [0, 0, 0]
assert real_roots(x**3, multiple=False) == [(0, 3)]
assert real_roots(x*(x**3 + x + 3)) == [rootof(x**3 + x + 3, 0), 0]
assert real_roots(x*(x**3 + x + 3), multiple=False) == [(rootof(
x**3 + x + 3, 0), 1), (0, 1)]
assert real_roots(
x**3*(x**3 + x + 3)) == [rootof(x**3 + x + 3, 0), 0, 0, 0]
assert real_roots(x**3*(x**3 + x + 3), multiple=False) == [(rootof(
x**3 + x + 3, 0), 1), (0, 3)]
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).real_roots() == [-S(1)/2, 2, 2]
assert Poly(g).real_roots() == [rootof(g, 0)]
def test_all_roots():
f = 2*x**3 - 7*x**2 + 4*x + 4
g = x**3 + x + 1
assert Poly(f).all_roots() == [-S(1)/2, 2, 2]
assert Poly(g).all_roots() == [rootof(g, 0), rootof(g, 1), rootof(g, 2)]
def test_nroots():
assert Poly(0, x).nroots() == []
assert Poly(1, x).nroots() == []
assert Poly(x**2 - 1, x).nroots() == [-1.0, 1.0]
assert Poly(x**2 + 1, x).nroots() == [-1.0*I, 1.0*I]
roots = Poly(x**2 - 1, x).nroots()
assert roots == [-1.0, 1.0]
roots = Poly(x**2 + 1, x).nroots()
assert roots == [-1.0*I, 1.0*I]
roots = Poly(x**2/3 - S(1)/3, x).nroots()
assert roots == [-1.0, 1.0]
roots = Poly(x**2/3 + S(1)/3, x).nroots()
assert roots == [-1.0*I, 1.0*I]
assert Poly(x**2 + 2*I, x).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(
x**2 + 2*I, x, extension=I).nroots() == [-1.0 + 1.0*I, 1.0 - 1.0*I]
assert Poly(0.2*x + 0.1).nroots() == [-0.5]
roots = nroots(x**5 + x + 1, n=5)
eps = Float("1e-5")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is S.true
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is S.true
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is S.true
assert re(roots[3]).epsilon_eq(+0.87743, eps) is S.true
assert im(roots[3]).epsilon_eq(-0.74486, eps) is S.true
assert re(roots[4]).epsilon_eq(+0.87743, eps) is S.true
assert im(roots[4]).epsilon_eq(+0.74486, eps) is S.true
eps = Float("1e-6")
assert re(roots[0]).epsilon_eq(-0.75487, eps) is S.false
assert im(roots[0]) == 0.0
assert re(roots[1]) == -0.5
assert im(roots[1]).epsilon_eq(-0.86602, eps) is S.false
assert re(roots[2]) == -0.5
assert im(roots[2]).epsilon_eq(+0.86602, eps) is S.false
assert re(roots[3]).epsilon_eq(+0.87743, eps) is S.false
assert im(roots[3]).epsilon_eq(-0.74486, eps) is S.false
assert re(roots[4]).epsilon_eq(+0.87743, eps) is S.false
assert im(roots[4]).epsilon_eq(+0.74486, eps) is S.false
raises(DomainError, lambda: Poly(x + y, x).nroots())
raises(MultivariatePolynomialError, lambda: Poly(x + y).nroots())
assert nroots(x**2 - 1) == [-1.0, 1.0]
roots = nroots(x**2 - 1)
assert roots == [-1.0, 1.0]
assert nroots(x + I) == [-1.0*I]
assert nroots(x + 2*I) == [-2.0*I]
raises(PolynomialError, lambda: nroots(0))
# issue 8296
f = Poly(x**4 - 1)
assert f.nroots(2) == [w.n(2) for w in f.all_roots()]
def test_ground_roots():
f = x**6 - 4*x**4 + 4*x**3 - x**2
assert Poly(f).ground_roots() == {S(1): 2, S(0): 2}
assert ground_roots(f) == {S(1): 2, S(0): 2}
def test_nth_power_roots_poly():
f = x**4 - x**2 + 1
f_2 = (x**2 - x + 1)**2
f_3 = (x**2 + 1)**2
f_4 = (x**2 + x + 1)**2
f_12 = (x - 1)**4
assert nth_power_roots_poly(f, 1) == f
raises(ValueError, lambda: nth_power_roots_poly(f, 0))
raises(ValueError, lambda: nth_power_roots_poly(f, x))
assert factor(nth_power_roots_poly(f, 2)) == f_2
assert factor(nth_power_roots_poly(f, 3)) == f_3
assert factor(nth_power_roots_poly(f, 4)) == f_4
assert factor(nth_power_roots_poly(f, 12)) == f_12
raises(MultivariatePolynomialError, lambda: nth_power_roots_poly(
x + y, 2, x, y))
def test_torational_factor_list():
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + sqrt(2))}))
assert _torational_factor_list(p, x) == (-2, [
(-x*(1 + sqrt(2))/2 + 1, 1),
(-x*(1 + sqrt(2)) - 1, 1),
(-x*(1 + sqrt(2)) + 1, 1)])
p = expand(((x**2-1)*(x-2)).subs({x:x*(1 + 2**Rational(1, 4))}))
assert _torational_factor_list(p, x) is None
def test_cancel():
assert cancel(0) == 0
assert cancel(7) == 7
assert cancel(x) == x
assert cancel(oo) == oo
assert cancel((2, 3)) == (1, 2, 3)
assert cancel((1, 0), x) == (1, 1, 0)
assert cancel((0, 1), x) == (1, 0, 1)
f, g, p, q = 4*x**2 - 4, 2*x - 2, 2*x + 2, 1
F, G, P, Q = [ Poly(u, x) for u in (f, g, p, q) ]
assert F.cancel(G) == (1, P, Q)
assert cancel((f, g)) == (1, p, q)
assert cancel((f, g), x) == (1, p, q)
assert cancel((f, g), (x,)) == (1, p, q)
assert cancel((F, G)) == (1, P, Q)
assert cancel((f, g), polys=True) == (1, P, Q)
assert cancel((F, G), polys=False) == (1, p, q)
f = (x**2 - 2)/(x + sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x - sqrt(2)
f = (x**2 - 2)/(x - sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x + sqrt(2)
assert cancel((x**2/4 - 1, x/2 - 1)) == (S(1)/2, x + 2, 1)
assert cancel((x**2 - y)/(x - y)) == 1/(x - y)*(x**2 - y)
assert cancel((x**2 - y**2)/(x - y), x) == x + y
assert cancel((x**2 - y**2)/(x - y), y) == x + y
assert cancel((x**2 - y**2)/(x - y)) == x + y
assert cancel((x**3 - 1)/(x**2 - 1)) == (x**2 + x + 1)/(x + 1)
assert cancel((x**3/2 - S(1)/2)/(x**2 - 1)) == (x**2 + x + 1)/(2*x + 2)
assert cancel((exp(2*x) + 2*exp(x) + 1)/(exp(x) + 1)) == exp(x) + 1
f = Poly(x**2 - a**2, x)
g = Poly(x - a, x)
F = Poly(x + a, x)
G = Poly(1, x)
assert cancel((f, g)) == (1, F, G)
f = x**3 + (sqrt(2) - 2)*x**2 - (2*sqrt(2) + 3)*x - 3*sqrt(2)
g = x**2 - 2
assert cancel((f, g), extension=True) == (1, x**2 - 2*x - 3, x - sqrt(2))
f = Poly(-2*x + 3, x)
g = Poly(-x**9 + x**8 + x**6 - x**5 + 2*x**2 - 3*x + 1, x)
assert cancel((f, g)) == (1, -f, -g)
f = Poly(y, y, domain='ZZ(x)')
g = Poly(1, y, domain='ZZ[x]')
assert f.cancel(
g) == (1, Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
assert f.cancel(g, include=True) == (
Poly(y, y, domain='ZZ(x)'), Poly(1, y, domain='ZZ(x)'))
f = Poly(5*x*y + x, y, domain='ZZ(x)')
g = Poly(2*x**2*y, y, domain='ZZ(x)')
assert f.cancel(g, include=True) == (
Poly(5*y + 1, y, domain='ZZ(x)'), Poly(2*x*y, y, domain='ZZ(x)'))
f = -(-2*x - 4*y + 0.005*(z - y)**2)/((z - y)*(-z + y + 2))
assert cancel(f).is_Mul == True
P = tanh(x - 3.0)
Q = tanh(x + 3.0)
f = ((-2*P**2 + 2)*(-P**2 + 1)*Q**2/2 + (-2*P**2 + 2)*(-2*Q**2 + 2)*P*Q - (-2*P**2 + 2)*P**2*Q**2 + (-2*Q**2 + 2)*(-Q**2 + 1)*P**2/2 - (-2*Q**2 + 2)*P**2*Q**2)/(2*sqrt(P**2*Q**2 + 0.0001)) \
+ (-(-2*P**2 + 2)*P*Q**2/2 - (-2*Q**2 + 2)*P**2*Q/2)*((-2*P**2 + 2)*P*Q**2/2 + (-2*Q**2 + 2)*P**2*Q/2)/(2*(P**2*Q**2 + 0.0001)**(S(3)/2))
assert cancel(f).is_Mul == True
# issue 7022
A = Symbol('A', commutative=False)
p1 = Piecewise((A*(x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p2 = Piecewise((A*(x - 1), x > 1), (1/x, True))
assert cancel(p1) == p2
assert cancel(2*p1) == 2*p2
assert cancel(1 + p1) == 1 + p2
assert cancel((x**2 - 1)/(x + 1)*p1) == (x - 1)*p2
assert cancel((x**2 - 1)/(x + 1) + p1) == (x - 1) + p2
p3 = Piecewise(((x**2 - 1)/(x + 1), x > 1), ((x + 2)/(x**2 + 2*x), True))
p4 = Piecewise(((x - 1), x > 1), (1/x, True))
assert cancel(p3) == p4
assert cancel(2*p3) == 2*p4
assert cancel(1 + p3) == 1 + p4
assert cancel((x**2 - 1)/(x + 1)*p3) == (x - 1)*p4
assert cancel((x**2 - 1)/(x + 1) + p3) == (x - 1) + p4
# issue 9363
M = MatrixSymbol('M', 5, 5)
assert cancel(M[0,0] + 7) == M[0,0] + 7
expr = sin(M[1, 4] + M[2, 1] * 5 * M[4, 0]) - 5 * M[1, 2] / z
assert cancel(expr) == (z*sin(M[1, 4] + M[2, 1] * 5 * M[4, 0]) - 5 * M[1, 2]) / z
def test_reduced():
f = 2*x**4 + y**2 - x**2 + y**3
G = [x**3 - x, y**3 - y]
Q = [2*x, 1]
r = x**2 + y**2 + y
assert reduced(f, G) == (Q, r)
assert reduced(f, G, x, y) == (Q, r)
H = groebner(G)
assert H.reduce(f) == (Q, r)
Q = [Poly(2*x, x, y), Poly(1, x, y)]
r = Poly(x**2 + y**2 + y, x, y)
assert _strict_eq(reduced(f, G, polys=True), (Q, r))
assert _strict_eq(reduced(f, G, x, y, polys=True), (Q, r))
H = groebner(G, polys=True)
assert _strict_eq(H.reduce(f), (Q, r))
f = 2*x**3 + y**3 + 3*y
G = groebner([x**2 + y**2 - 1, x*y - 2])
Q = [x**2 - x*y**3/2 + x*y/2 + y**6/4 - y**4/2 + y**2/4, -y**5/4 + y**3/2 + 3*y/4]
r = 0
assert reduced(f, G) == (Q, r)
assert G.reduce(f) == (Q, r)
assert reduced(f, G, auto=False)[1] != 0
assert G.reduce(f, auto=False)[1] != 0
assert G.contains(f) is True
assert G.contains(f + 1) is False
assert reduced(1, [1], x) == ([1], 0)
raises(ComputationFailed, lambda: reduced(1, [1]))
def test_groebner():
assert groebner([], x, y, z) == []
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex') == [1 + x**2, -1 + y**4]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex') == [-1 + y**4, z**3, 1 + x**2]
assert groebner([x**2 + 1, y**4*x + x**3], x, y, order='lex', polys=True) == \
[Poly(1 + x**2, x, y), Poly(-1 + y**4, x, y)]
assert groebner([x**2 + 1, y**4*x + x**3, x*y*z**3], x, y, z, order='grevlex', polys=True) == \
[Poly(-1 + y**4, x, y, z), Poly(z**3, x, y, z), Poly(1 + x**2, x, y, z)]
assert groebner([x**3 - 1, x**2 - 1]) == [x - 1]
assert groebner([Eq(x**3, 1), Eq(x**2, 1)]) == [x - 1]
F = [3*x**2 + y*z - 5*x - 1, 2*x + 3*x*y + y**2, x - 3*y + x*z - 2*z**2]
f = z**9 - x**2*y**3 - 3*x*y**2*z + 11*y*z**2 + x**2*z**2 - 5
G = groebner(F, x, y, z, modulus=7, symmetric=False)
assert G == [1 + x + y + 3*z + 2*z**2 + 2*z**3 + 6*z**4 + z**5,
1 + 3*y + y**2 + 6*z**2 + 3*z**3 + 3*z**4 + 3*z**5 + 4*z**6,
1 + 4*y + 4*z + y*z + 4*z**3 + z**4 + z**6,
6 + 6*z + z**2 + 4*z**3 + 3*z**4 + 6*z**5 + 3*z**6 + z**7]
Q, r = reduced(f, G, x, y, z, modulus=7, symmetric=False, polys=True)
assert sum([ q*g for q, g in zip(Q, G.polys)], r) == Poly(f, modulus=7)
F = [x*y - 2*y, 2*y**2 - x**2]
assert groebner(F, x, y, order='grevlex') == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner(F, y, x, order='grevlex') == \
[x**3 - 2*x**2, -x**2 + 2*y**2, x*y - 2*y]
assert groebner(F, order='grevlex', field=True) == \
[y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
assert groebner([1], x) == [1]
assert groebner([x**2 + 2.0*y], x, y) == [1.0*x**2 + 2.0*y]
raises(ComputationFailed, lambda: groebner([1]))
assert groebner([x**2 - 1, x**3 + 1], method='buchberger') == [x + 1]
assert groebner([x**2 - 1, x**3 + 1], method='f5b') == [x + 1]
raises(ValueError, lambda: groebner([x, y], method='unknown'))
def test_fglm():
F = [a + b + c + d, a*b + a*d + b*c + b*d, a*b*c + a*b*d + a*c*d + b*c*d, a*b*c*d - 1]
G = groebner(F, a, b, c, d, order=grlex)
B = [
4*a + 3*d**9 - 4*d**5 - 3*d,
4*b + 4*c - 3*d**9 + 4*d**5 + 7*d,
4*c**2 + 3*d**10 - 4*d**6 - 3*d**2,
4*c*d**4 + 4*c - d**9 + 4*d**5 + 5*d,
d**12 - d**8 - d**4 + 1,
]
assert groebner(F, a, b, c, d, order=lex) == B
assert G.fglm(lex) == B
F = [9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
-72*t*x**7 - 252*t*x**6 + 192*t*x**5 + 1260*t*x**4 + 312*t*x**3 - 404*t*x**2 - 576*t*x + \
108*t - 72*x**7 - 256*x**6 + 192*x**5 + 1280*x**4 + 312*x**3 - 576*x + 96]
G = groebner(F, t, x, order=grlex)
B = [
203577793572507451707*t + 627982239411707112*x**7 - 666924143779443762*x**6 - \
10874593056632447619*x**5 + 5119998792707079562*x**4 + 72917161949456066376*x**3 + \
20362663855832380362*x**2 - 142079311455258371571*x + 183756699868981873194,
9*x**8 + 36*x**7 - 32*x**6 - 252*x**5 - 78*x**4 + 468*x**3 + 288*x**2 - 108*x + 9,
]
assert groebner(F, t, x, order=lex) == B
assert G.fglm(lex) == B
F = [x**2 - x - 3*y + 1, -2*x + y**2 + y - 1]
G = groebner(F, x, y, order=lex)
B = [
x**2 - x - 3*y + 1,
y**2 - 2*x + y - 1,
]
assert groebner(F, x, y, order=grlex) == B
assert G.fglm(grlex) == B
def test_is_zero_dimensional():
assert is_zero_dimensional([x, y], x, y) is True
assert is_zero_dimensional([x**3 + y**2], x, y) is False
assert is_zero_dimensional([x, y, z], x, y, z) is True
assert is_zero_dimensional([x, y, z], x, y, z, t) is False
F = [x*y - z, y*z - x, x*y - y]
assert is_zero_dimensional(F, x, y, z) is True
F = [x**2 - 2*x*z + 5, x*y**2 + y*z**3, 3*y**2 - 8*z**2]
assert is_zero_dimensional(F, x, y, z) is True
def test_GroebnerBasis():
F = [x*y - 2*y, 2*y**2 - x**2]
G = groebner(F, x, y, order='grevlex')
H = [y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y]
P = [ Poly(h, x, y) for h in H ]
assert isinstance(G, GroebnerBasis) is True
assert len(G) == 3
assert G[0] == H[0] and not G[0].is_Poly
assert G[1] == H[1] and not G[1].is_Poly
assert G[2] == H[2] and not G[2].is_Poly
assert G[1:] == H[1:] and not any(g.is_Poly for g in G[1:])
assert G[:2] == H[:2] and not any(g.is_Poly for g in G[1:])
assert G.exprs == H
assert G.polys == P
assert G.gens == (x, y)
assert G.domain == ZZ
assert G.order == grevlex
assert G == H
assert G == tuple(H)
assert G == P
assert G == tuple(P)
assert G != []
G = groebner(F, x, y, order='grevlex', polys=True)
assert G[0] == P[0] and G[0].is_Poly
assert G[1] == P[1] and G[1].is_Poly
assert G[2] == P[2] and G[2].is_Poly
assert G[1:] == P[1:] and all(g.is_Poly for g in G[1:])
assert G[:2] == P[:2] and all(g.is_Poly for g in G[1:])
def test_poly():
assert poly(x) == Poly(x, x)
assert poly(y) == Poly(y, y)
assert poly(x + y) == Poly(x + y, x, y)
assert poly(x + sin(x)) == Poly(x + sin(x), x, sin(x))
assert poly(x + y, wrt=y) == Poly(x + y, y, x)
assert poly(x + sin(x), wrt=sin(x)) == Poly(x + sin(x), sin(x), x)
assert poly(x*y + 2*x*z**2 + 17) == Poly(x*y + 2*x*z**2 + 17, x, y, z)
assert poly(2*(y + z)**2 - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - 1, y, z)
assert poly(
x*(y + z)**2 - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - 1, x, y, z)
assert poly(2*x*(
y + z)**2 - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*x*z**2 - 1, x, y, z)
assert poly(2*(
y + z)**2 - x - 1) == Poly(2*y**2 + 4*y*z + 2*z**2 - x - 1, x, y, z)
assert poly(x*(
y + z)**2 - x - 1) == Poly(x*y**2 + 2*x*y*z + x*z**2 - x - 1, x, y, z)
assert poly(2*x*(y + z)**2 - x - 1) == Poly(2*x*y**2 + 4*x*y*z + 2*
x*z**2 - x - 1, x, y, z)
assert poly(x*y + (x + y)**2 + (x + z)**2) == \
Poly(2*x*z + 3*x*y + y**2 + z**2 + 2*x**2, x, y, z)
assert poly(x*y*(x + y)*(x + z)**2) == \
Poly(x**3*y**2 + x*y**2*z**2 + y*x**2*z**2 + 2*z*x**2*
y**2 + 2*y*z*x**3 + y*x**4, x, y, z)
assert poly(Poly(x + y + z, y, x, z)) == Poly(x + y + z, y, x, z)
assert poly((x + y)**2, x) == Poly(x**2 + 2*x*y + y**2, x, domain=ZZ[y])
assert poly((x + y)**2, y) == Poly(x**2 + 2*x*y + y**2, y, domain=ZZ[x])
assert poly(1, x) == Poly(1, x)
raises(GeneratorsNeeded, lambda: poly(1))
# issue 6184
assert poly(x + y, x, y) == Poly(x + y, x, y)
assert poly(x + y, y, x) == Poly(x + y, y, x)
def test_keep_coeff():
u = Mul(2, x + 1, evaluate=False)
assert _keep_coeff(S(1), x) == x
assert _keep_coeff(S(-1), x) == -x
assert _keep_coeff(S(1.0), x) == 1.0*x
assert _keep_coeff(S(-1.0), x) == -1.0*x
assert _keep_coeff(S(1), 2*x) == 2*x
assert _keep_coeff(S(2), x/2) == x
assert _keep_coeff(S(2), sin(x)) == 2*sin(x)
assert _keep_coeff(S(2), x + 1) == u
assert _keep_coeff(x, 1/x) == 1
assert _keep_coeff(x + 1, S(2)) == u
@XFAIL
def test_poly_matching_consistency():
# Test for this issue:
# https://github.com/sympy/sympy/issues/5514
assert I * Poly(x, x) == Poly(I*x, x)
assert Poly(x, x) * I == Poly(I*x, x)
@XFAIL
def test_issue_5786():
assert expand(factor(expand(
(x - I*y)*(z - I*t)), extension=[I])) == -I*t*x - t*y + x*z - I*y*z
def test_noncommutative():
class foo(Expr):
is_commutative=False
e = x/(x + x*y)
c = 1/( 1 + y)
assert cancel(foo(e)) == foo(c)
assert cancel(e + foo(e)) == c + foo(c)
assert cancel(e*foo(c)) == c*foo(c)
def test_to_rational_coeffs():
assert to_rational_coeffs(
Poly(x**3 + y*x**2 + sqrt(y), x, domain='EX')) == None
def test_factor_terms():
# issue 7067
assert factor_list(x*(x + y)) == (1, [(x, 1), (x + y, 1)])
assert sqf_list(x*(x + y)) == (1, [(x, 1), (x + y, 1)])
| ChristinaZografou/sympy | sympy/polys/tests/test_polytools.py | Python | bsd-3-clause | 106,569 |
# -*- coding: utf-8 -*-
"""WAV file IO functions
"""
import numpy as np
from scipy.io import wavfile
from os import path as op
import warnings
from .._utils import verbose_dec, logger, _has_scipy_version
@verbose_dec
def read_wav(fname, verbose=None):
"""Read in a WAV file
Parameters
----------
fname : str
Filename to load.
verbose : bool, str, int, or None
If not None, override default verbose level.
Returns
-------
data : array
The WAV file data. Will be of datatype np.float64. If the data
had been saved as integers (typical), this function will
automatically rescale the data to be between -1 and +1.
The result will have dimension n_channels x n_samples.
fs : int
The wav sample rate
"""
fs, data = wavfile.read(fname)
data = np.atleast_2d(data.T)
orig_dtype = data.dtype
max_val = _get_dtype_norm(orig_dtype)
data = np.ascontiguousarray(data.astype(np.float64) / max_val)
_print_wav_info('Read', data, orig_dtype)
return data, fs
@verbose_dec
def write_wav(fname, data, fs, dtype=np.int16, overwrite=False, verbose=None):
"""Write a WAV file
Parameters
----------
fname : str
Filename to save as.
data : array
The data to save.
fs : int
The sample rate of the data.
dtype : numpy dtype
The output format to use. np.int16 is standard for many wav files,
but np.float32 or np.float64 has higher dynamic range.
overwrite : bool
If True, overwrite the file if necessary.
verbose : bool, str, int, or None
If not None, override default verbose level.
"""
if not overwrite and op.isfile(fname):
raise IOError('File {} exists, overwrite=True must be '
'used'.format(op.basename(fname)))
if not np.dtype(type(fs)).kind == 'i':
fs = int(fs)
warnings.warn('Warning: sampling rate is being cast to integer and '
'may be truncated.')
data = np.atleast_2d(data)
if np.dtype(dtype).kind not in ['i', 'f']:
raise TypeError('dtype must be integer or float')
if np.dtype(dtype).kind == 'f':
if not _has_scipy_version('0.13'):
raise RuntimeError('cannot write float datatype unless '
'scipy >= 0.13 is installed')
elif np.dtype(dtype).itemsize == 8:
raise RuntimeError('Writing 64-bit integers is not supported')
if np.dtype(data.dtype).kind == 'f':
if np.dtype(dtype).kind == 'i' and np.max(np.abs(data)) > 1.:
raise ValueError('Data must be between -1 and +1 when saving '
'with an integer dtype')
_print_wav_info('Writing', data, dtype)
max_val = _get_dtype_norm(dtype)
data = (data * max_val).astype(dtype)
wavfile.write(fname, fs, data.T)
def _print_wav_info(pre, data, dtype):
"""Helper to print WAV info"""
logger.info('{0} WAV file with {1} channel{3} and {2} samples '
'(format {4})'.format(pre, data.shape[0], data.shape[1],
's' if data.shape[0] != 1 else '',
dtype))
def _get_dtype_norm(dtype):
"""Helper to get normalization factor for a given datatype"""
if np.dtype(dtype).kind == 'i':
info = np.iinfo(dtype)
maxval = min(-info.min, info.max)
else: # == 'f'
maxval = 1.0
return maxval
| LABSN/expyfun | expyfun/io/_wav.py | Python | bsd-3-clause | 3,494 |
# -*- coding: utf-8 -*-
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implementation of HMAC key management command for GCS.
NOTE: Any modification to this file or corresponding HMAC logic
should be submitted in its own PR and release to avoid
concurrency issues in testing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.metrics import LogCommandParams
from gslib.project_id import PopulateProjectId
from gslib.utils.cloud_api_helper import GetCloudApiInstance
from gslib.utils.text_util import InsistAscii
_CREATE_SYNOPSIS = """
gsutil hmac create [-p <project>] <service_account_email>
"""
_DELETE_SYNOPSIS = """
gsutil hmac delete [-p <project>] <access_id>
"""
_GET_SYNOPSIS = """
gsutil hmac get [-p <project>] <access_id>
"""
_LIST_SYNOPSIS = """
gsutil hmac list [-a] [-l] [-p <project>] [-u <service_account_email>]
"""
_UPDATE_SYNOPSIS = """
gsutil hmac update -s (ACTIVE|INACTIVE) [-e <etag>] [-p <project>] <access_id>
"""
_CREATE_DESCRIPTION = """
<B>CREATE</B>
The ``hmac create`` command creates an HMAC key for the specified service
account:
gsutil hmac create test.service.account@test_project.iam.gserviceaccount.com
The secret key material is only available upon creation, so be sure to store
the returned secret along with the access_id.
<B>CREATE OPTIONS</B>
The ``create`` sub-command has the following option
-p <project_id> Specify a project in which to create a key.
"""
_DELETE_DESCRIPTION = """
<B>DELETE</B>
The ``hmac delete`` command permanently deletes the specified HMAC key:
gsutil hmac delete GOOG56JBMFZX6PMPTQ62VD2
Note that keys must be updated to be in the ``INACTIVE`` state before they can be
deleted.
<B>DELETE OPTIONS</B>
The "delete" sub-command has the following option
-p <project_id> Specify a project from which to delete a key.
"""
_GET_DESCRIPTION = """
<B>GET</B>
The ``hmac get`` command retrieves the specified HMAC key's metadata:
gsutil hmac get GOOG56JBMFZX6PMPTQ62VD2
Note that there is no option to retrieve a key's secret material after it has
been created.
<B>GET OPTIONS</B>
The ``get`` sub-command has the following option
-p <project_id> Specify a project from which to get a key.
"""
_LIST_DESCRIPTION = """
<B>LIST</B>
The ``hmac list`` command lists the HMAC key metadata for keys in the
specified project. If no project is specified in the command, the default
project is used.
<B>LIST OPTIONS</B>
The ``list`` sub-command has the following options
-a Show all keys, including recently deleted
keys.
-l Use long listing format. Shows each key's full
metadata excluding the secret.
-p <project_id> Specify a project from which to list keys.
-u <service_account_email> Filter keys for a single service account.
"""
_UPDATE_DESCRIPTION = """
<B>UPDATE</B>
The ``hmac update`` command sets the state of the specified key:
gsutil hmac update -s INACTIVE -e M42da= GOOG56JBMFZX6PMPTQ62VD2
Valid state arguments are ``ACTIVE`` and ``INACTIVE``. To set a key to state
``DELETED``, use the ``hmac delete`` command on an ``INACTIVE`` key. If an etag
is set in the command, it will only succeed if the provided etag matches the etag
of the stored key.
<B>UPDATE OPTIONS</B>
The ``update`` sub-command has the following options
-s <ACTIVE|INACTIVE> Sets the state of the specified key to either
``ACTIVE`` or ``INACTIVE``.
-e <etag> If provided, the update will only be performed
if the specified etag matches the etag of the
stored key.
-p <project_id> Specify a project in which to update a key.
"""
_SYNOPSIS = (_CREATE_SYNOPSIS + _DELETE_SYNOPSIS.lstrip('\n') +
_GET_SYNOPSIS.lstrip('\n') + _LIST_SYNOPSIS.lstrip('\n') +
_UPDATE_SYNOPSIS.lstrip('\n') + '\n\n')
_DESCRIPTION = """
You can use the ``hmac`` command to interact with service account `HMAC keys
<https://cloud.google.com/storage/docs/authentication/hmackeys>`_.
The ``hmac`` command has five sub-commands:
""" + '\n'.join([
_CREATE_DESCRIPTION,
_DELETE_DESCRIPTION,
_GET_DESCRIPTION,
_LIST_DESCRIPTION,
_UPDATE_DESCRIPTION,
])
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
_VALID_UPDATE_STATES = ['INACTIVE', 'ACTIVE']
_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
_create_help_text = CreateHelpText(_CREATE_SYNOPSIS, _CREATE_DESCRIPTION)
_delete_help_text = CreateHelpText(_DELETE_SYNOPSIS, _DELETE_DESCRIPTION)
_get_help_text = CreateHelpText(_GET_SYNOPSIS, _GET_DESCRIPTION)
_list_help_text = CreateHelpText(_LIST_SYNOPSIS, _LIST_DESCRIPTION)
_update_help_text = CreateHelpText(_UPDATE_SYNOPSIS, _UPDATE_DESCRIPTION)
def _AccessIdException(command_name, subcommand, synopsis):
return CommandException(
'%s %s requires an Access ID to be specified as the last argument.\n%s' %
(command_name, subcommand, synopsis))
def _KeyMetadataOutput(metadata):
"""Format the key metadata for printing to the console."""
def FormatInfo(name, value, new_line=True):
"""Format the metadata name-value pair into two aligned columns."""
width = 22
info_str = '\t%-*s %s' % (width, name + ':', value)
if new_line:
info_str += '\n'
return info_str
message = 'Access ID %s:\n' % metadata.accessId
message += FormatInfo('State', metadata.state)
message += FormatInfo('Service Account', metadata.serviceAccountEmail)
message += FormatInfo('Project', metadata.projectId)
message += FormatInfo('Time Created',
metadata.timeCreated.strftime(_TIME_FORMAT))
message += FormatInfo('Time Last Updated',
metadata.updated.strftime(_TIME_FORMAT))
message += FormatInfo('Etag', metadata.etag, new_line=False)
return message
class HmacCommand(Command):
"""Implementation of gsutil hmac command."""
command_spec = Command.CreateCommandSpec(
'hmac',
min_args=1,
max_args=8,
supported_sub_args='ae:lp:s:u:',
file_url_ok=True,
urls_start_arg=1,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
usage_synopsis=_SYNOPSIS,
argparse_arguments={
'create': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'delete': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'get': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'list': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
'update': [CommandArgument.MakeZeroOrMoreCloudOrFileURLsArgument()],
},
)
help_spec = Command.HelpSpec(
help_name='hmac',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary=('CRUD operations on service account HMAC keys.'),
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'create': _create_help_text,
'delete': _delete_help_text,
'get': _get_help_text,
'list': _list_help_text,
'update': _update_help_text,
})
def _CreateHmacKey(self, thread_state=None):
"""Creates HMAC key for a service account."""
if self.args:
self.service_account_email = self.args[0]
else:
err_msg = ('%s %s requires a service account to be specified as the '
'last argument.\n%s')
raise CommandException(
err_msg %
(self.command_name, self.action_subcommand, _CREATE_SYNOPSIS))
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.CreateHmacKey(self.project_id,
self.service_account_email,
provider='gs')
print('%-12s %s' % ('Access ID:', response.metadata.accessId))
print('%-12s %s' % ('Secret:', response.secret))
def _DeleteHmacKey(self, thread_state=None):
"""Deletes an HMAC key."""
if self.args:
access_id = self.args[0]
else:
raise _AccessIdException(self.command_name, self.action_subcommand,
_DELETE_SYNOPSIS)
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
gsutil_api.DeleteHmacKey(self.project_id, access_id, provider='gs')
def _GetHmacKey(self, thread_state=None):
"""Gets HMAC key from its Access Id."""
if self.args:
access_id = self.args[0]
else:
raise _AccessIdException(self.command_name, self.action_subcommand,
_GET_SYNOPSIS)
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.GetHmacKey(self.project_id, access_id, provider='gs')
print(_KeyMetadataOutput(response))
def _ListHmacKeys(self, thread_state=None):
"""Lists HMAC keys for a project or service account."""
if self.args:
raise CommandException(
'%s %s received unexpected arguments.\n%s' %
(self.command_name, self.action_subcommand, _LIST_SYNOPSIS))
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.ListHmacKeys(self.project_id,
self.service_account_email,
self.show_all,
provider='gs')
short_list_format = '%s\t%-12s %s'
if self.long_list:
for item in response:
print(_KeyMetadataOutput(item))
print()
else:
for item in response:
print(short_list_format %
(item.accessId, item.state, item.serviceAccountEmail))
def _UpdateHmacKey(self, thread_state=None):
"""Update an HMAC key's state."""
if not self.state:
raise CommandException(
'A state flag must be supplied for %s %s\n%s' %
(self.command_name, self.action_subcommand, _UPDATE_SYNOPSIS))
elif self.state not in _VALID_UPDATE_STATES:
raise CommandException('The state flag value must be one of %s' %
', '.join(_VALID_UPDATE_STATES))
if self.args:
access_id = self.args[0]
else:
raise _AccessIdException(self.command_name, self.action_subcommand,
_UPDATE_SYNOPSIS)
gsutil_api = GetCloudApiInstance(self, thread_state=thread_state)
response = gsutil_api.UpdateHmacKey(self.project_id,
access_id,
self.state,
self.etag,
provider='gs')
print(_KeyMetadataOutput(response))
def RunCommand(self):
"""Command entry point for the hmac command."""
if self.gsutil_api.GetApiSelector(provider='gs') != ApiSelector.JSON:
raise CommandException(
'The "hmac" command can only be used with the GCS JSON API')
self.action_subcommand = self.args.pop(0)
self.ParseSubOpts(check_args=True)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
LogCommandParams(sub_opts=self.sub_opts)
self.service_account_email = None
self.state = None
self.show_all = False
self.long_list = False
self.etag = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-u':
self.service_account_email = a
elif o == '-p':
# Project IDs are sent as header values when using gs and s3 XML APIs.
InsistAscii(a, 'Invalid non-ASCII character found in project ID')
self.project_id = a
elif o == '-s':
self.state = a
elif o == '-a':
self.show_all = True
elif o == '-l':
self.long_list = True
elif o == '-e':
self.etag = a
if not self.project_id:
self.project_id = PopulateProjectId(None)
method_for_arg = {
'create': self._CreateHmacKey,
'delete': self._DeleteHmacKey,
'get': self._GetHmacKey,
'list': self._ListHmacKeys,
'update': self._UpdateHmacKey,
}
if self.action_subcommand not in method_for_arg:
raise CommandException('Invalid subcommand "%s" for the %s command.\n'
'See "gsutil help hmac".' %
(self.action_subcommand, self.command_name))
LogCommandParams(subcommands=[self.action_subcommand])
method_for_arg[self.action_subcommand]()
return 0
| catapult-project/catapult | third_party/gsutil/gslib/commands/hmac.py | Python | bsd-3-clause | 13,530 |
#
# cocos2d
# http://python.cocos2d.org
#
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
from cocos.sprite import Sprite
class TestLayer(cocos.layer.Layer):
def __init__(self):
super(TestLayer, self).__init__()
x, y = director.get_window_size()
sprite1 = Sprite('grossini.png')
sprite2 = Sprite('grossinis_sister1.png')
sprite3 = Sprite('grossinis_sister2.png')
sprite1.position = (x // 2, y // 2)
sprite2.position = (x // 4, y // 2)
sprite3.position = (3 * x / 4.0, y // 2)
self.add(sprite2)
self.add(sprite1)
self.add(sprite3)
sprite1.do(RotateBy(360, 1) * 16)
sprite2.do(RotateBy(-360, 1) * 16)
sprite3.do(RotateBy(-360, 1) * 16)
if __name__ == "__main__":
director.init(resizable=True)
main_scene = cocos.scene.Scene()
main_scene.transform_anchor = (320, 240)
child1_scene = cocos.scene.Scene()
child2_scene = cocos.scene.Scene()
child3_scene = cocos.scene.Scene()
child4_scene = cocos.scene.Scene()
sprites = TestLayer()
sprites.transform_anchor = 320, 240
child1_scene.add(ColorLayer(0, 0, 255, 255))
child1_scene.add(sprites)
child1_scene.scale = 1.5
child1_scene.position = (-160, -120)
child1_scene.transform_anchor = (320, 240)
child2_scene.add(ColorLayer(0, 255, 0, 255))
child2_scene.add(sprites)
child2_scene.scale = 1.5
child2_scene.position = (160, 120)
child2_scene.transform_anchor = (320, 240)
child3_scene.add(ColorLayer(255, 0, 0, 255))
child3_scene.add(sprites)
child3_scene.scale = 1.5
child3_scene.position = (-160, 120)
child3_scene.transform_anchor = (320, 240)
child4_scene.add(ColorLayer(255, 255, 255, 255))
child4_scene.add(sprites)
child4_scene.scale = 1.5
child4_scene.position = (160, -120)
child4_scene.transform_anchor = (320, 240)
main_scene.add(child1_scene)
main_scene.add(child2_scene)
main_scene.add(child3_scene)
main_scene.add(child4_scene)
rot = RotateBy(-360, 2)
rot2 = RotateBy(360, 4)
sleep = Delay(2)
sleep2 = Delay(2)
sc1 = ScaleTo(0.5, 0.5) + Delay(1.5)
sc2 = Delay(0.5) + ScaleTo(0.5, 0.5) + Delay(1.0)
sc3 = Delay(1.0) + ScaleTo(0.5, 0.5) + Delay(0.5)
sc4 = Delay(1.5) + ScaleTo(0.5, 0.5)
child1_scene.do(sc4 + sleep + rot + sleep + rot + rot)
child2_scene.do(sc3 + sleep + rot + sleep + rot + Reverse(rot))
child3_scene.do(sc2 + sleep + rot + sleep + rot + Reverse(rot))
child4_scene.do(sc1 + sleep + rot + sleep + rot + rot)
main_scene.do(sleep + Reverse(rot) * 2 + rot * 2 + sleep)
sprites.do(Delay(4) + rot2 * 3)
director.run(main_scene)
| dangillet/cocos | samples/demo_multiple_scenes.py | Python | bsd-3-clause | 3,011 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import tempfile
import unittest
import mock
import multiprocessing
from azure.cli.core.cloud import (Cloud,
CloudEndpoints,
CloudSuffixes,
add_cloud,
get_cloud,
get_clouds,
get_custom_clouds,
remove_cloud,
get_active_cloud_name,
init_known_clouds,
AZURE_PUBLIC_CLOUD,
KNOWN_CLOUDS,
CloudEndpointNotSetException)
from azure.cli.core._config import get_config_parser
from azure.cli.core._profile import Profile
from azure.cli.core.util import CLIError
def _helper_get_clouds(_):
""" Helper method for multiprocessing.Pool.map func that uses throwaway arg """
get_clouds()
class TestCloud(unittest.TestCase):
@mock.patch('azure.cli.core._profile.CLOUD', Cloud('AzureCloud'))
def test_endpoint_none(self):
with self.assertRaises(CloudEndpointNotSetException):
profile = Profile()
profile.get_login_credentials()
@mock.patch('azure.cli.core.cloud.get_custom_clouds', lambda: [])
def test_add_get_delete_custom_cloud(self):
endpoint_rm = 'http://management.contoso.com'
suffix_storage = 'core.contoso.com'
endpoints = CloudEndpoints(resource_manager=endpoint_rm)
suffixes = CloudSuffixes(storage_endpoint=suffix_storage)
c = Cloud('MyOwnCloud', endpoints=endpoints, suffixes=suffixes)
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]) as\
config_file:
with mock.patch('azure.cli.core.cloud.get_custom_clouds', lambda: []):
add_cloud(c)
config = get_config_parser()
config.read(config_file)
self.assertTrue(c.name in config.sections())
self.assertEqual(config.get(c.name, 'endpoint_resource_manager'), endpoint_rm)
self.assertEqual(config.get(c.name, 'suffix_storage_endpoint'), suffix_storage)
custom_clouds = get_custom_clouds()
self.assertEqual(len(custom_clouds), 1)
self.assertEqual(custom_clouds[0].name, c.name)
self.assertEqual(custom_clouds[0].endpoints.resource_manager,
c.endpoints.resource_manager)
self.assertEqual(custom_clouds[0].suffixes.storage_endpoint,
c.suffixes.storage_endpoint)
with mock.patch('azure.cli.core.cloud._get_cloud', lambda _: c):
remove_cloud(c.name)
custom_clouds = get_custom_clouds()
self.assertEqual(len(custom_clouds), 0)
def test_add_get_cloud_with_profile(self):
endpoint_rm = 'http://management.contoso.com'
endpoints = CloudEndpoints(resource_manager=endpoint_rm)
profile = '2017-03-09-profile'
c = Cloud('MyOwnCloud', endpoints=endpoints, profile=profile)
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]) as\
config_file:
add_cloud(c)
config = get_config_parser()
config.read(config_file)
self.assertTrue(c.name in config.sections())
self.assertEqual(config.get(c.name, 'endpoint_resource_manager'), endpoint_rm)
self.assertEqual(config.get(c.name, 'profile'), profile)
custom_clouds = get_custom_clouds()
self.assertEqual(len(custom_clouds), 1)
self.assertEqual(custom_clouds[0].name, c.name)
self.assertEqual(custom_clouds[0].endpoints.resource_manager,
c.endpoints.resource_manager)
self.assertEqual(custom_clouds[0].profile,
c.profile)
def test_add_get_cloud_with_invalid_profile(self):
# Cloud has profile that doesn't exist so an exception should be raised
profile = 'none-existent-profile'
c = Cloud('MyOwnCloud', profile=profile)
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]) as\
config_file:
add_cloud(c)
config = get_config_parser()
config.read(config_file)
self.assertTrue(c.name in config.sections())
self.assertEqual(config.get(c.name, 'profile'), profile)
with self.assertRaises(CLIError):
get_custom_clouds()
def test_get_default_latest_profile(self):
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]):
clouds = get_clouds()
for c in clouds:
self.assertEqual(c.profile, 'latest')
def test_custom_cloud_management_endpoint_set(self):
# We have set management endpoint so don't override it
endpoint_rm = 'http://management.contoso.com'
endpoint_mgmt = 'http://management.core.contoso.com'
endpoints = CloudEndpoints(resource_manager=endpoint_rm, management=endpoint_mgmt)
profile = '2017-03-09-profile'
c = Cloud('MyOwnCloud', endpoints=endpoints, profile=profile)
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]):
add_cloud(c)
custom_clouds = get_custom_clouds()
self.assertEqual(len(custom_clouds), 1)
self.assertEqual(custom_clouds[0].endpoints.resource_manager,
c.endpoints.resource_manager)
# CLI logic should keep our set management endpoint
self.assertEqual(custom_clouds[0].endpoints.management,
c.endpoints.management)
def test_custom_cloud_no_management_endpoint_set(self):
# Use ARM 'resource manager' endpoint as 'management' (old ASM) endpoint if only ARM endpoint is set
endpoint_rm = 'http://management.contoso.com'
endpoints = CloudEndpoints(resource_manager=endpoint_rm)
profile = '2017-03-09-profile'
c = Cloud('MyOwnCloud', endpoints=endpoints, profile=profile)
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]):
add_cloud(c)
custom_clouds = get_custom_clouds()
self.assertEqual(len(custom_clouds), 1)
self.assertEqual(custom_clouds[0].endpoints.resource_manager,
c.endpoints.resource_manager)
# CLI logic should add management endpoint to equal resource_manager as we didn't set it
self.assertEqual(custom_clouds[0].endpoints.management,
c.endpoints.resource_manager)
def test_get_active_cloud_name_default(self):
expected = AZURE_PUBLIC_CLOUD.name
actual = get_active_cloud_name()
self.assertEqual(expected, actual)
def test_known_cloud_missing_endpoint(self):
# New endpoints in cloud config should be saved in config for the known clouds
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]) as\
config_file:
# Save the clouds to config to get started
init_known_clouds()
cloud = get_cloud(AZURE_PUBLIC_CLOUD.name)
self.assertEqual(cloud.endpoints.batch_resource_id,
AZURE_PUBLIC_CLOUD.endpoints.batch_resource_id)
# Remove an endpoint from the cloud config (leaving other config values as is)
config = get_config_parser()
config.read(config_file)
config.remove_option(AZURE_PUBLIC_CLOUD.name, 'endpoint_batch_resource_id')
with open(config_file, 'w') as cf:
config.write(cf)
# Verify that it was removed
config.read(config_file)
self.assertFalse(config.has_option(AZURE_PUBLIC_CLOUD.name,
'endpoint_batch_resource_id'))
# Init the known clouds again (this should add the missing endpoint)
init_known_clouds(force=True)
config.read(config_file)
# The missing endpoint should have been added by init_known_clouds as 'force' was used.
self.assertTrue(config.has_option(AZURE_PUBLIC_CLOUD.name,
'endpoint_batch_resource_id'),
'Expected the missing endpoint to be added but it was not.')
actual_val = config.get(AZURE_PUBLIC_CLOUD.name, 'endpoint_batch_resource_id')
expected_val = AZURE_PUBLIC_CLOUD.endpoints.batch_resource_id
self.assertEqual(actual_val, expected_val)
def test_init_known_clouds_force_concurrent(self):
# Support multiple concurrent calls to clouds init method
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]) as config_file:
pool_size = 100
p = multiprocessing.Pool(pool_size)
p.map(init_known_clouds, [True] * pool_size)
p.close()
p.join()
# Check we can read the file with no exceptions
config = get_config_parser()
config.read(config_file)
# Check that we can get all the known clouds without any exceptions
for kc in KNOWN_CLOUDS:
get_cloud(kc.name)
def test_get_clouds_concurrent(self):
with mock.patch('azure.cli.core.cloud.CLOUD_CONFIG_FILE', tempfile.mkstemp()[1]) as config_file:
init_known_clouds()
pool_size = 100
p = multiprocessing.Pool(pool_size)
p.map(_helper_get_clouds, range(pool_size))
p.close()
p.join()
# Check we can read the file with no exceptions
config = get_config_parser()
config.read(config_file)
for kc in KNOWN_CLOUDS:
get_cloud(kc.name)
if __name__ == '__main__':
unittest.main()
| samedder/azure-cli | src/azure-cli-core/azure/cli/core/tests/test_cloud.py | Python | mit | 10,518 |
# -*- coding: utf-8 -*-
import sys
from inspect import getmembers
from ._hoedown import lib
def _set_constants():
is_int = lambda n: isinstance(n, int)
for name, value in getmembers(lib, is_int):
if not name.startswith('HOEDOWN_'):
continue
setattr(sys.modules[__name__], name[8:], value)
if not hasattr(sys.modules[__name__], 'EXT_TABLES'):
_set_constants()
| Weasyl/misaka | misaka/constants.py | Python | mit | 406 |
import unittest
import numpy
import six
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import backend
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@backend.inject_backend_tests(
['test_forward', 'test_backward'],
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ [{'use_cuda': True}])
class TestLocalResponseNormalization(unittest.TestCase):
def setUp(self):
x = numpy.random.uniform(-1, 1, (2, 7, 3, 2)).astype(self.dtype)
gy = numpy.random.uniform(-1, 1, (2, 7, 3, 2)).astype(self.dtype)
self.inputs = [x]
self.grad_outputs = [gy]
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-3}
else:
self.check_forward_options = {}
self.check_backward_options = {'atol': 3e-4, 'rtol': 3e-3}
def forward_cpu(self, inputs):
# Naive implementation
x, = inputs
y_expect = numpy.zeros_like(x)
for n, c, h, w in numpy.ndindex(x.shape):
s = 0
for i in six.moves.range(max(0, c - 2), min(7, c + 2)):
s += x[n, i, h, w] ** 2
denom = (2 + 1e-4 * s) ** .75
y_expect[n, c, h, w] = x[n, c, h, w] / denom
return y_expect,
def check_forward(self, inputs, backend_config):
y_expect, = self.forward_cpu(inputs)
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
with backend_config:
y = functions.local_response_normalization(*inputs)
assert y.data.dtype == self.dtype
testing.assert_allclose(y_expect, y.data, **self.check_forward_options)
def test_forward(self, backend_config):
self.check_forward(self.inputs, backend_config)
def check_backward(self, inputs, grad_outputs, backend_config):
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
grad_outputs = cuda.to_gpu(grad_outputs)
with backend_config:
gradient_check.check_backward(
functions.local_response_normalization, inputs, grad_outputs,
eps=1, dtype=numpy.float64, **self.check_backward_options)
def test_backward(self, backend_config):
self.check_backward(self.inputs, self.grad_outputs, backend_config)
testing.run_module(__name__, __file__)
| rezoo/chainer | tests/chainer_tests/functions_tests/normalization_tests/test_local_response_normalization.py | Python | mit | 2,648 |
class Solution(object):
def wordsTyping(self, sentence, rows, cols):
"""
:type sentence: List[str]
:type rows: int
:type cols: int
:rtype: int
"""
cnt = 0
start = 0
row_circ = len(''.join(sentence)) + len(sentence)
nrc = (cols + 1) / row_circ
cols = (cols + 1) % row_circ
circle = []
for i in range(rows):
j = len(sentence[start])
while j < cols:
tmp = (start + 1) % len(sentence)
if not tmp:
cnt += 1
start = tmp
j += 1 + len(sentence[start])
circle.append(cnt)
if not start:
break
ncircle = len(circle)
cnt = nrc * rows
cnt += rows/ncircle * circle[-1]
cnt += circle[rows%ncircle - 1] if rows%ncircle > 0 else 0
return cnt
| Chasego/cod | leetcode/418-Sentence-Screen-Fitting/SentenceScreenFitting_001.py | Python | mit | 921 |
"""
The outputs.py module represents some form of all outputs
from the Automater program to include all variation of
output files. Any addition to the Automater that brings
any other output requirement should be programmed in this module.
Class(es):
SiteDetailOutput -- Wrapper class around all functions that print output
from Automater, to include standard output and file system output.
Function(s):
No global exportable functions are defined.
Exception(s):
No exceptions exported.
"""
import csv
import socket
import re
from datetime import datetime
from operator import attrgetter
class SiteDetailOutput(object):
"""
SiteDetailOutput provides the capability to output information
to the screen, a text file, a comma-seperated value file, or
a file formatted with html markup (readable by web browsers).
Public Method(s):
createOutputInfo
Instance variable(s):
_listofsites - list storing the list of site results stored.
"""
def __init__(self,sitelist):
"""
Class constructor. Stores the incoming list of sites in the _listofsites list.
Argument(s):
sitelist -- list containing site result information to be printed.
Return value(s):
Nothing is returned from this Method.
"""
self._listofsites = []
self._listofsites = sitelist
@property
def ListOfSites(self):
"""
Checks instance variable _listofsites for content.
Returns _listofsites if it has content or None if it does not.
Argument(s):
No arguments are required.
Return value(s):
_listofsites -- list containing list of site results if variable contains data.
None -- if _listofsites is empty or not assigned.
Restriction(s):
This Method is tagged as a Property.
"""
if self._listofsites is None or len(self._listofsites) == 0:
return None
return self._listofsites
def createOutputInfo(self,parser):
"""
Checks parser information calls correct print methods based on parser requirements.
Returns nothing.
Argument(s):
parser -- Parser object storing program input parameters used when program was run.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
self.PrintToScreen(parser.hasBotOut())
if parser.hasCEFOutFile():
self.PrintToCEFFile(parser.CEFOutFile)
if parser.hasTextOutFile():
self.PrintToTextFile(parser.TextOutFile)
if parser.hasHTMLOutFile():
self.PrintToHTMLFile(parser.HTMLOutFile)
if parser.hasCSVOutSet():
self.PrintToCSVFile(parser.CSVOutFile)
def PrintToScreen(self, printinbotformat):
"""
Calls correct function to ensure site information is printed to the user's standard output correctly.
Returns nothing.
Argument(s):
printinbotformat -- True or False argument representing minimized output. True if minimized requested.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
if printinbotformat:
self.PrintToScreenBot()
else:
self.PrintToScreenNormal()
def PrintToScreenBot(self):
"""
Formats site information minimized and prints it to the user's standard output.
Returns nothing.
Argument(s):
No arguments are required.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
target = ""
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): # this is a multisite
for index in range(len(site.RegEx)): # the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if target != site.Target:
print "\n**_ Results found for: " + site.Target + " _**"
target = site.Target
# Check for them ALL to be None or 0 length
sourceurlhasnoreturn = True
for answer in siteimpprop:
if answer is not None:
if len(answer) > 0:
sourceurlhasnoreturn = False
if sourceurlhasnoreturn:
print '[+] ' + site.SourceURL + ' No results found'
break
else:
if siteimpprop is None or len(siteimpprop) == 0:
print "No results in the " + site.FriendlyName[index] + " category"
else:
if siteimpprop[index] is None or len(siteimpprop[index]) == 0:
print site.ReportStringForResult[index] + ' No results found'
else:
laststring = ""
# if it's just a string we don't want it output like a list
if isinstance(siteimpprop[index], basestring):
if "" + site.ReportStringForResult[index] + " " + str(siteimpprop) != laststring:
print "" + site.ReportStringForResult[index] + " " + str(siteimpprop).replace('www.', 'www[.]').replace('http', 'hxxp')
laststring = "" + site.ReportStringForResult[index] + " " + str(siteimpprop)
# must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop[index]:
if "" + site.ReportStringForResult[index] + " " + str(siteresult) != laststring:
print "" + site.ReportStringForResult[index] + " " + str(siteresult).replace('www.', 'www[.]').replace('http', 'hxxp')
laststring = "" + site.ReportStringForResult[index] + " " + str(siteresult)
else:#this is a singlesite
siteimpprop = site.getImportantProperty(0)
if target != site.Target:
print "\n**_ Results found for: " + site.Target + " _**"
target = site.Target
if siteimpprop is None or len(siteimpprop)==0:
print '[+] ' + site.FriendlyName + ' No results found'
else:
laststring = ""
#if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
if "" + site.ReportStringForResult + " " + str(siteimpprop) != laststring:
print "" + site.ReportStringForResult + " " + str(siteimpprop).replace('www.', 'www[.]').replace('http', 'hxxp')
laststring = "" + site.ReportStringForResult + " " + str(siteimpprop)
#must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop:
if "" + site.ReportStringForResult + " " + str(siteresult) != laststring:
print "" + site.ReportStringForResult + " " + str(siteresult).replace('www.', 'www[.]').replace('http', 'hxxp')
laststring = "" + site.ReportStringForResult + " " + str(siteresult)
else:
pass
def PrintToScreenNormal(self):
"""
Formats site information correctly and prints it to the user's standard output.
Returns nothing.
Argument(s):
No arguments are required.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
target = ""
if sites is not None:
for site in sites:
if not isinstance(site._regex, basestring): # this is a multisite
for index in range(len(site.RegEx)): # the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if target != site.Target:
print "\n____________________ Results found for: " + site.Target + " ____________________"
target = site.Target
if siteimpprop is None or len(siteimpprop) == 0:
print "No results in the " + site.FriendlyName[index] + " category"
else:
if siteimpprop[index] is None or len(siteimpprop[index]) == 0:
print site.ReportStringForResult[index] + ' No results found'
else:
laststring = ""
# if it's just a string we don't want it output like a list
if isinstance(siteimpprop[index], basestring):
if "" + site.ReportStringForResult[index] + " " + str(siteimpprop) != laststring:
print "" + site.ReportStringForResult[index] + " " + str(siteimpprop).replace('www.', 'www[.]').replace('http', 'hxxp')
laststring = "" + site.ReportStringForResult[index] + " " + str(siteimpprop)
# must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop[index]:
if "" + site.ReportStringForResult[index] + " " + str(siteresult) != laststring:
print "" + site.ReportStringForResult[index] + " " + str(siteresult).replace('www.', 'www[.]').replace('http', 'hxxp')
laststring = "" + site.ReportStringForResult[index] + " " + str(siteresult)
else: # this is a singlesite
siteimpprop = site.getImportantProperty(0)
if target != site.Target:
print "\n____________________ Results found for: " + site.Target + " ____________________"
target = site.Target
if siteimpprop is None or len(siteimpprop) == 0:
print "No results found in the " + site.FriendlyName
else:
laststring = ""
# if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
if "" + site.ReportStringForResult + " " + str(siteimpprop) != laststring:
print "" + site.ReportStringForResult + " " + str(siteimpprop).replace('www.', 'www[.]').replace('http', 'hxxp')
laststring = "" + site.ReportStringForResult + " " + str(siteimpprop)
# must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop:
if "" + site.ReportStringForResult + " " + str(siteresult) != laststring:
print "" + site.ReportStringForResult + " " + str(siteresult).replace('www.', 'www[.]').replace('http', 'hxxp')
laststring = "" + site.ReportStringForResult + " " + str(siteresult)
else:
pass
def PrintToCEFFile(self,cefoutfile):
"""
Formats site information correctly and prints it to an output file in CEF format.
CEF format specification from http://mita-tac.wikispaces.com/file/view/CEF+White+Paper+071709.pdf
"Jan 18 11:07:53 host message"
where message:
"CEF:Version|Device Vendor|Device Product|Device Version|Signature ID|Name|Severity|Extension"
Returns nothing.
Argument(s):
cefoutfile -- A string representation of a file that will store the output.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
curr_date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
hostname = socket.gethostname()
prefix = ' '.join([curr_date,hostname])
cef_version = "CEF:Version1.1"
cef_deviceVendor = "TekDefense"
cef_deviceProduct = "Automater"
cef_deviceVersion = "2.1"
cef_SignatureID = "0"
cef_Severity = "2"
cef_Extension = " "
cef_fields = [cef_version,cef_deviceVendor,cef_deviceProduct,cef_deviceVersion, \
cef_SignatureID, cef_Severity, cef_Extension]
pattern = "^\[\+\]\s+"
target = ""
print '\n[+] Generating CEF output: ' + cefoutfile
f = open(cefoutfile, "wb")
csv.register_dialect('escaped', delimiter='|', escapechar='\\', doublequote=False, quoting=csv.QUOTE_NONE)
cefRW = csv.writer(f, 'escaped')
# cefRW.writerow(['Target', 'Type', 'Source', 'Result'])
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): # this is a multisite:
for index in range(len(site.RegEx)): # the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+res])+"] "] + \
[1] + [tgt])
else:
if siteimpprop[index] is None or len(siteimpprop[index])==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+res])+"] "] + \
[1] + [tgt])
else:
laststring = ""
# if it's just a string we don't want it to output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
if "" + tgt + typ + source + res != laststring:
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+res])+"] " + \
re.sub(pattern,"",site.ReportStringForResult[index])+ str(siteimpprop)] + \
[cef_Severity] + [tgt])
laststring = "" + tgt + typ + source + res
# must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop[index]:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = siteresult
if "" + tgt + typ + source + str(res) != laststring:
cefRW.writerow([prefix] + cef_fields[:5] + ["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+str(res)])+"] " + re.sub(pattern, "", site.ReportStringForResult[index]) + str(siteresult)] + [cef_Severity] + [tgt])
laststring = "" + tgt + typ + source + str(res)
else: # this is a singlesite
siteimpprop = site.getImportantProperty(0)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = "No results found"
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+res])+"] "] + \
[1] + [tgt])
else:
laststring = ""
# if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
if "" + tgt + typ + source + res != laststring:
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+res])+"] " + \
re.sub(pattern,"",site.ReportStringForResult)+ str(siteimpprop)] + \
[cef_Severity] + [tgt])
laststring = "" + tgt + typ + source + res
else:
laststring = ""
for siteresult in siteimpprop:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteresult
if "" + tgt + typ + source + str(res) != laststring:
cefRW.writerow([prefix] + cef_fields[:5] + \
["["+",".join(["tgt="+tgt,"typ="+typ,"src="+source,"res="+str(res)])+"] " + \
re.sub(pattern,"",site.ReportStringForResult)+ str(siteimpprop)] + \
[cef_Severity] + [tgt])
laststring = "" + tgt + typ + source + str(res)
f.flush()
f.close()
print "" + cefoutfile + " Generated"
def PrintToTextFile(self,textoutfile):
"""
Formats site information correctly and prints it to an output file in text format.
Returns nothing.
Argument(s):
textoutfile -- A string representation of a file that will store the output.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
target = ""
print "\n[+] Generating text output: " + textoutfile
f = open(textoutfile, "w")
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): #this is a multisite
for index in range(len(site.RegEx)): #the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if target != site.Target:
f.write("\n____________________ Results found for: " + site.Target + " ____________________")
target = site.Target
if siteimpprop is None or len(siteimpprop)==0:
f.write("\nNo results in the " + site.FriendlyName[index] + " category")
else:
if siteimpprop[index] is None or len(siteimpprop[index]) == 0:
f.write('\n' + site.ReportStringForResult[index] + ' No results found')
else:
laststring = ""
#if it's just a string we don't want it to output like a list
if isinstance(siteimpprop[index], basestring):
if "" + site.ReportStringForResult[index] + " " + str(siteimpprop) != laststring:
f.write("\n" + site.ReportStringForResult[index] + " " + str(siteimpprop))
laststring = "" + site.ReportStringForResult[index] + " " + str(siteimpprop)
#must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop[index]:
if "" + site.ReportStringForResult[index] + " " + str(siteresult) != laststring:
f.write("\n" + site.ReportStringForResult[index] + " " + str(siteresult))
laststring = "" + site.ReportStringForResult[index] + " " + str(siteresult)
else:#this is a singlesite
siteimpprop = site.getImportantProperty(0)
if target != site.Target:
f.write("\n____________________ Results found for: " + site.Target + " ____________________")
target = site.Target
if siteimpprop is None or len(siteimpprop)==0:
f.write("\nNo results found in the " + site.FriendlyName)
else:
laststring = ""
#if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
if "" + site.ReportStringForResult + " " + str(siteimpprop) != laststring:
f.write("\n" + site.ReportStringForResult + " " + str(siteimpprop))
laststring = "" + site.ReportStringForResult + " " + str(siteimpprop)
else:
laststring = ""
for siteresult in siteimpprop:
if "" + site.ReportStringForResult + " " + str(siteresult) != laststring:
f.write("\n" + site.ReportStringForResult + " " + str(siteresult))
laststring = "" + site.ReportStringForResult + " " + str(siteresult)
f.flush()
f.close()
print "" + textoutfile + " Generated"
def PrintToCSVFile(self,csvoutfile):
"""
Formats site information correctly and prints it to an output file with comma-seperators.
Returns nothing.
Argument(s):
csvoutfile -- A string representation of a file that will store the output.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
target = ""
print '\n[+] Generating CSV output: ' + csvoutfile
f = open(csvoutfile, "wb")
csvRW = csv.writer(f, quoting=csv.QUOTE_ALL)
csvRW.writerow(['Target', 'Type', 'Source', 'Result'])
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): #this is a multisite:
for index in range(len(site.RegEx)): #the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
csvRW.writerow([tgt,typ,source,res])
else:
if siteimpprop[index] is None or len(siteimpprop[index])==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
csvRW.writerow([tgt,typ,source,res])
else:
laststring = ""
#if it's just a string we don't want it to output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
if "" + tgt + typ + source + res != laststring:
csvRW.writerow([tgt,typ,source,res])
laststring = "" + tgt + typ + source + res
#must be a list since it failed the isinstance check on string
else:
laststring = ""
for siteresult in siteimpprop[index]:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = siteresult
if "" + tgt + typ + source + str(res) != laststring:
csvRW.writerow([tgt,typ,source,res])
laststring = "" + tgt + typ + source + str(res)
else:#this is a singlesite
siteimpprop = site.getImportantProperty(0)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = "No results found"
csvRW.writerow([tgt,typ,source,res])
else:
laststring = ""
#if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
if "" + tgt + typ + source + res != laststring:
csvRW.writerow([tgt,typ,source,res])
laststring = "" + tgt + typ + source + res
else:
laststring = ""
for siteresult in siteimpprop:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteresult
if "" + tgt + typ + source + str(res) != laststring:
csvRW.writerow([tgt,typ,source,res])
laststring = "" + tgt + typ + source + str(res)
f.flush()
f.close()
print "" + csvoutfile + " Generated"
def PrintToHTMLFile(self, htmloutfile):
"""
Formats site information correctly and prints it to an output file using HTML markup.
Returns nothing.
Argument(s):
htmloutfile -- A string representation of a file that will store the output.
Return value(s):
Nothing is returned from this Method.
Restriction(s):
The Method has no restrictions.
"""
sites = sorted(self.ListOfSites, key=attrgetter('Target'))
target = ""
print '\n[+] Generating HTML output: ' + htmloutfile
f = open(htmloutfile, "w")
f.write(self.getHTMLOpening())
if sites is not None:
for site in sites:
if not isinstance(site._regex,basestring): #this is a multisite:
for index in range(len(site.RegEx)): #the regexs will ensure we have the exact number of lookups
siteimpprop = site.getImportantProperty(index)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else:
if siteimpprop[index] is None or len(siteimpprop[index])==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = "No results found"
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else:
# if it's just a string we don't want it to output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else:
for siteresult in siteimpprop[index]:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName[index]
res = siteresult
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else: # this is a singlesite
siteimpprop = site.getImportantProperty(0)
if siteimpprop is None or len(siteimpprop)==0:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = "No results found"
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else:
# if it's just a string we don't want it output like a list
if isinstance(siteimpprop, basestring):
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteimpprop
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
else:
for siteresult in siteimpprop:
tgt = site.Target
typ = site.TargetType
source = site.FriendlyName
res = siteresult
tableData = '<tr><td>' + tgt + '</td><td>' + typ + '</td><td>' + source + '</td><td>' + str(res) + '</td></tr>'
f.write(tableData)
f.write(self.getHTMLClosing())
f.flush()
f.close()
print "" + htmloutfile + " Generated"
@classmethod
def PrintStandardOutput(cls, strout, *args, **kwargs):
if 'verbose' in kwargs.keys():
if kwargs['verbose'] is True:
print strout
else:
return
else:
print strout
def getHTMLOpening(self):
"""
Creates HTML markup to provide correct formatting for initial HTML file requirements.
Returns string that contains opening HTML markup information for HTML output file.
Argument(s):
No arguments required.
Return value(s):
string.
Restriction(s):
The Method has no restrictions.
"""
return '''<style type="text/css">
#table-3 {
border: 1px solid #DFDFDF;
background-color: #F9F9F9;
width: 100%;
-moz-border-radius: 3px;
-webkit-border-radius: 3px;
border-radius: 3px;
font-family: Arial,"Bitstream Vera Sans",Helvetica,Verdana,sans-serif;
color: #333;
}
#table-3 td, #table-3 th {
border-top-color: white;
border-bottom: 1px solid #DFDFDF;
color: #555;
}
#table-3 th {
text-shadow: rgba(255, 255, 255, 0.796875) 0px 1px 0px;
font-family: Georgia,"Times New Roman","Bitstream Charter",Times,serif;
font-weight: normal;
padding: 7px 7px 8px;
text-align: left;
line-height: 1.3em;
font-size: 14px;
}
#table-3 td {
font-size: 12px;
padding: 4px 7px 2px;
vertical-align: top;
}res
h1 {
text-shadow: rgba(255, 255, 255, 0.796875) 0px 1px 0px;
font-family: Georgia,"Times New Roman","Bitstream Charter",Times,serif;
font-weight: normal;
padding: 7px 7px 8px;
text-align: Center;
line-height: 1.3em;
font-size: 40px;
}
h2 {
text-shadow: rgba(255, 255, 255, 0.796875) 0px 1px 0px;
font-family: Georgia,"Times New Roman","Bitstream Charter",Times,serif;
font-weight: normal;
padding: 7px 7px 8px;
text-align: left;
line-height: 1.3em;
font-size: 16px;
}
h4 {
text-shadow: rgba(255, 255, 255, 0.796875) 0px 1px 0px;
font-family: Georgia,"Times New Roman","Bitstream Charter",Times,serif;
font-weight: normal;
padding: 7px 7px 8px;
text-align: left;
line-height: 1.3em;
font-size: 10px;
}
</style>
<html>
<body>
<title> Automater Results </title>
<h1> Automater Results </h1>
<table id="table-3">
<tr>
<th>Target</th>
<th>Type</th>
<th>Source</th>
<th>Result</th>
</tr>
'''
def getHTMLClosing(self):
"""
Creates HTML markup to provide correct formatting for closing HTML file requirements.
Returns string that contains closing HTML markup information for HTML output file.
Argument(s):
No arguments required.
Return value(s):
string.
Restriction(s):
The Method has no restrictions.
"""
return '''
</table>
<br>
<br>
<p>Created using Automater.py by @TekDefense <a href="http://www.tekdefense.com">http://www.tekdefense.com</a>; <a href="https://github.com/1aN0rmus/TekDefense">https://github.com/1aN0rmus/TekDefense</a></p>
</body>
</html>
'''
| CSIRTUK/TekDefense-Automater | outputs.py | Python | mit | 39,375 |
#!/usr/bin/env python
import argparse
import getpass
import sys
import csv
from cassandra.auth import PlainTextAuthProvider
from cassandra.cqlengine import connection
from ddb import configuration
import utils
from coveragestore import SampleCoverage
from collections import defaultdict
def get_target_amplicons(filename):
amplicons_list = list()
sys.stdout.write("Opening file {} to retrieve reporting amplicons\n".format(filename))
with open(filename, "r") as bedfile:
reader = csv.reader(bedfile, dialect='excel-tab')
for row in reader:
amplicons_list.append(row[3])
return amplicons_list
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input configuration file for samples")
parser.add_argument('-c', '--configuration', help="Configuration file for various settings")
parser.add_argument('-r', '--report', help="Root name for reports (per sample)")
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
args = parser.parse_args()
sys.stdout.write("Parsing configuration data\n")
config = configuration.configure_runtime(args.configuration)
sys.stdout.write("Parsing sample data\n")
samples = configuration.configure_samples(args.samples_file, config)
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
connection.setup([args.address], "variantstore", auth_provider=auth_provider)
else:
connection.setup([args.address], "variantstore")
sys.stdout.write("Processing samples\n")
for sample in samples:
sys.stdout.write("Processing coverage for sample {}\n".format(sample))
report_panel_path = "/mnt/shared-data/ddb-configs/disease_panels/{}/{}".format(samples[sample]['panel'],
samples[sample]['report'])
target_amplicons = get_target_amplicons(report_panel_path)
reportable_amplicons = list()
for amplicon in target_amplicons:
coverage_data = SampleCoverage.objects.timeout(None).filter(
SampleCoverage.sample == samples[sample]['sample_name'],
SampleCoverage.amplicon == amplicon,
SampleCoverage.run_id == samples[sample]['run_id'],
SampleCoverage.library_name == samples[sample]['library_name'],
SampleCoverage.program_name == "sambamba"
)
ordered_variants = coverage_data.order_by('amplicon', 'run_id').limit(coverage_data.count() + 1000)
for variant in ordered_variants:
reportable_amplicons.append(variant)
with open("{}_{}.txt".format(sample, args.report), "w") as coverage_report:
coverage_report.write("Sample\tLibrary\tAmplicon\tNum Reads\tCoverage\n")
for amplicon in reportable_amplicons:
coverage_report.write("{}\t{}\t{}\t{}\t{}\n".format(amplicon.sample,
amplicon.library_name,
amplicon.amplicon,
amplicon.num_reads,
amplicon.mean_coverage))
| dgaston/ddbio-variantstore | Misc_and_Old/create_sample_coverage_reports.py | Python | mit | 3,582 |
# -*- coding: utf-8 -*-
"""Parse Archimate XML Exchange File Format into a MongoDB DB""" | RafaAguilar/archi2mongodb | archimate2mongodb/pkg/utils/__init__.py | Python | mit | 88 |
from django.core.management.base import NoArgsCommand
from django.conf import settings
class Command(NoArgsCommand):
help = "Removes CompSlides from the database that do not have matching files on the drive."
def handle_noargs(self, **options):
from stager.staging.models import CompSlide
import os.path
slides = CompSlide.objects.all()
for slide in slides:
if not os.path.exists(settings.MEDIA_ROOT+"/"+str(slide.image)):
print str(slide.image), "deleted"
slide.delete()
| broderboy/ai-stager | stager/staging/management/commands/cleanupslides.py | Python | mit | 596 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# file include_bib.py
# This file is part of LyX, the document processor.
# Licence details can be found in the file COPYING.
# authors Richard Heck and [SchAirport]
# Full author contact details are available in file CREDITS
# This script is intended to include a BibTeX-generated biblography
# in a LaTeX file, as publishers often want. It can be run manually
# on an exported LaTeX file, though it needs to be compiled first,
# so the bbl file will exist.
#
# It should also be possible to create a LyX converter to run this
# automatically. To set it up, create a format "ltxbbl"; make sure to
# check it as a document format. Then create a LaTeX-->ltxbbl converter,
# with the command:
# python -tt $$s/scripts/include_bib.py $$i $$o
# and give it the flags:
# needaux,nice
# You'll then have it in the export menu.
#
# We do not activate this converter by default, because there are problems
# when one tries to use multiple bibliographies.
#
# Please report any problems on the devel list.
import sys, os
class secbib:
def __init__(self, start = -1, end = -1):
self.start = start
self.end = end
class BibError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def InsertBib(fil, out):
''' Inserts the contents of the .bbl file instead of the bibliography in a new .tex file '''
texlist = open(fil, 'r').readlines()
# multiple bibliographies
biblist = []
stylist = []
for i, line in enumerate(texlist):
if "\\bibliographystyle" in line:
stylist.append(i)
elif "\\bibliography" in line:
biblist.append(i)
elif "\\begin{btSect}" in line:
raise BibError("Cannot export sectioned bibliographies")
if len(biblist) > 1:
raise BibError("Cannot export multiple bibliographies.")
if not biblist:
raise BibError("No biliography found!")
bibpos = biblist[0]
newlist = texlist[0:bibpos]
bblfile = fil[:-4] + ".bbl"
bbllist = open(bblfile, 'r').readlines()
newlist += bbllist
newlist += texlist[bibpos + 1:]
outfile = open(out, 'w')
outfile.write("".join(newlist))
outfile.close()
return out
def usage():
print r'''
Usage: python include_bib.py file.tex [outfile.tex]
Includes the contents of file.bbl, which must exist in the
same directory as file.tex, in place of the \bibliography
command, and creates the new file outfile.tex. If no name
for that file is given, we create: file-bbl.tex.
'''
if __name__ == "__main__":
args = len(sys.argv)
if args <= 1 or args > 3:
usage()
sys.exit(0)
# we might should make sure this is a tex file....
infile = sys.argv[1]
if infile[-4:] != ".tex":
print "Error: " + infile + " is not a TeX file"
usage()
sys.exit(1)
if args == 3:
outfile = sys.argv[2]
else:
outfile = infile[:-4] + "-bbl.tex"
newfile = InsertBib(infile, outfile)
print "Wrote " + outfile
| hashinisenaratne/HSTML | lib/scripts/include_bib.py | Python | gpl-2.0 | 2,972 |
# -*- coding: UTF-8 -*-
"""
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import jsunpack
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class UsersFilesResolver(UrlResolver):
name = "UsersFiles"
domains = ["usersfiles.com"]
pattern = '(?://|\.)(usersfiles\.com)/(?:embed-)?([0-9a-zA-Z/]+)'
def __init__(self):
self.net = common.Net()
self.net.set_user_agent(common.IE_USER_AGENT)
self.headers = {'User-Agent': common.IE_USER_AGENT}
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
match = re.search('<script[^>]*>(eval.*?)</script>', html, re.DOTALL)
if match:
js_data = jsunpack.unpack(match.group(1))
stream_url = re.findall('<param\s+name="src"\s*value="([^"]+)', js_data)
stream_url += re.findall('file\s*:\s*[\'|\"](.+?)[\'|\"]', js_data)
stream_url = [i for i in stream_url if not i.endswith('.srt')]
if stream_url:
return stream_url[0]
raise ResolverError('Unable to find userfiles video')
def get_url(self, host, media_id):
return 'http://usersfiles.com/%s' % media_id
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
| wndias/bc.repository | script.module.urlresolver/lib/urlresolver/plugins/usersfiles.py | Python | gpl-2.0 | 2,201 |
# -*- coding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.osv import expression
from openerp.tools import float_is_zero
from openerp.tools import float_compare, float_round
from openerp.tools.misc import formatLang
from openerp.exceptions import UserError, ValidationError
import time
import math
class AccountCashboxLine(models.Model):
""" Cash Box Details """
_name = 'account.cashbox.line'
_description = 'CashBox Line'
_rec_name = 'coin_value'
_order = 'coin_value'
@api.one
@api.depends('coin_value', 'number')
def _sub_total(self):
""" Calculates Sub total"""
self.subtotal = self.coin_value * self.number
coin_value = fields.Float(string='Coin/Bill Value', required=True, digits=0)
number = fields.Integer(string='Number of Coins/Bills', help='Opening Unit Numbers')
subtotal = fields.Float(compute='_sub_total', string='Subtotal', digits=0, readonly=True)
cashbox_id = fields.Many2one('account.bank.statement.cashbox')
class AccountBankStmtCashWizard(models.Model):
"""
Account Bank Statement popup that allows entering cash details.
"""
_name = 'account.bank.statement.cashbox'
_description = 'Account Bank Statement Cashbox Details'
cashbox_lines_ids = fields.One2many('account.cashbox.line', 'cashbox_id', string='Cashbox Lines')
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('active_id', False)
bnk_stmt = self.env['account.bank.statement'].browse(bnk_stmt_id)
total = 0.0
for lines in self.cashbox_lines_ids:
total += lines.subtotal
if self.env.context.get('balance', False) == 'start':
#starting balance
bnk_stmt.write({'balance_start': total, 'cashbox_start_id': self.id})
else:
#closing balance
bnk_stmt.write({'balance_end_real': total, 'cashbox_end_id': self.id})
return {'type': 'ir.actions.act_window_close'}
class AccountBankStmtCloseCheck(models.TransientModel):
"""
Account Bank Statement wizard that check that closing balance is correct.
"""
_name = 'account.bank.statement.closebalance'
_description = 'Account Bank Statement closing balance'
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('active_id', False)
if bnk_stmt_id:
self.env['account.bank.statement'].browse(bnk_stmt_id).button_confirm_bank()
return {'type': 'ir.actions.act_window_close'}
class AccountBankStatement(models.Model):
@api.one
@api.depends('line_ids', 'balance_start', 'line_ids.amount', 'balance_end_real')
def _end_balance(self):
self.total_entry_encoding = sum([line.amount for line in self.line_ids])
self.balance_end = self.balance_start + self.total_entry_encoding
self.difference = self.balance_end_real - self.balance_end
@api.one
@api.depends('journal_id')
def _compute_currency(self):
self.currency_id = self.journal_id.currency_id or self.env.user.company_id.currency_id
@api.one
@api.depends('line_ids.journal_entry_ids')
def _check_lines_reconciled(self):
self.all_lines_reconciled = all([line.journal_entry_ids.ids or line.account_id.id for line in self.line_ids])
@api.model
def _default_journal(self):
journal_type = self.env.context.get('journal_type', False)
company_id = self.env['res.company']._company_default_get('account.bank.statement').id
if journal_type:
journals = self.env['account.journal'].search([('type', '=', journal_type), ('company_id', '=', company_id)])
if journals:
return journals[0]
return False
@api.multi
def _set_opening_balance(self, journal_id):
last_bnk_stmt = self.search([('journal_id', '=', journal_id), ('state', '=', 'confirm')], order="date desc", limit=1)
for bank_stmt in self:
if last_bnk_stmt:
bank_stmt.balance_start = last_bnk_stmt.balance_end
else:
bank_stmt.balance_start = 0
@api.model
def _default_opening_balance(self):
#Search last bank statement and set current opening balance as closing balance of previous one
journal_id = self._context.get('default_journal_id', False) or self._context.get('journal_id', False)
if journal_id:
self._set_opening_balance(journal_id)
_name = "account.bank.statement"
_description = "Bank Statement"
_order = "date desc, id desc"
_inherit = ['mail.thread']
name = fields.Char(string='Reference', states={'open': [('readonly', False)]}, copy=False, readonly=True)
date = fields.Date(required=True, states={'confirm': [('readonly', True)]}, select=True, copy=False, default=fields.Date.context_today)
date_done = fields.Datetime(string="Closed On")
balance_start = fields.Monetary(string='Starting Balance', states={'confirm': [('readonly', True)]}, default=_default_opening_balance)
balance_end_real = fields.Monetary('Ending Balance', states={'confirm': [('readonly', True)]})
state = fields.Selection([('open', 'New'), ('confirm', 'Validated')], string='Status', required=True, readonly=True, copy=False, default='open')
currency_id = fields.Many2one('res.currency', compute='_compute_currency', oldname='currency')
journal_id = fields.Many2one('account.journal', string='Journal', required=True, states={'confirm': [('readonly', True)]}, default=_default_journal)
journal_type = fields.Selection(related='journal_id.type', help="Technical field used for usability purposes")
company_id = fields.Many2one('res.company', related='journal_id.company_id', string='Company', store=True, readonly=True,
default=lambda self: self.env['res.company']._company_default_get('account.bank.statement'))
total_entry_encoding = fields.Monetary('Transactions Subtotal', compute='_end_balance', store=True, help="Total of transaction lines.")
balance_end = fields.Monetary('Computed Balance', compute='_end_balance', store=True, help='Balance as calculated based on Opening Balance and transaction lines')
difference = fields.Monetary(compute='_end_balance', store=True, help="Difference between the computed ending balance and the specified ending balance.")
line_ids = fields.One2many('account.bank.statement.line', 'statement_id', string='Statement lines', states={'confirm': [('readonly', True)]}, copy=True)
move_line_ids = fields.One2many('account.move.line', 'statement_id', string='Entry lines', states={'confirm': [('readonly', True)]})
all_lines_reconciled = fields.Boolean(compute='_check_lines_reconciled')
user_id = fields.Many2one('res.users', string='Responsible', required=False, default=lambda self: self.env.user)
cashbox_start_id = fields.Many2one('account.bank.statement.cashbox')
cashbox_end_id = fields.Many2one('account.bank.statement.cashbox')
@api.onchange('journal_id')
def onchange_journal_id(self):
self._set_opening_balance(self.journal_id.id)
@api.multi
def _balance_check(self):
for stmt in self:
if not stmt.currency_id.is_zero(stmt.difference):
if stmt.journal_type == 'cash':
if stmt.difference < 0.0:
account = stmt.journal_id.loss_account_id
name = _('Loss')
else:
# statement.difference > 0.0
account = stmt.journal_id.profit_account_id
name = _('Profit')
if not account:
raise UserError(_('There is no account defined on the journal %s for %s involved in a cash difference.') % (stmt.journal_id.name, name))
values = {
'statement_id': stmt.id,
'account_id': account.id,
'amount': stmt.difference,
'name': _("Cash difference observed during the counting (%s)") % name,
}
self.env['account.bank.statement.line'].create(values)
else:
balance_end_real = formatLang(self.env, stmt.balance_end_real, currency_obj=stmt.currency_id)
balance_end = formatLang(self.env, stmt.balance_end, currency_obj=stmt.currency_id)
raise UserError(_('The ending balance is incorrect !\nThe expected balance (%s) is different from the computed one. (%s)')
% (balance_end_real, balance_end))
return True
@api.model
def create(self, vals):
if not vals.get('name'):
journal_id = vals.get('journal_id', self._context.get('default_journal_id', False))
journal = self.env['account.journal'].browse(journal_id)
vals['name'] = journal.sequence_id.with_context(ir_sequence_date=vals.get('date')).next_by_id()
return super(AccountBankStatement, self).create(vals)
@api.multi
def unlink(self):
for statement in self:
if statement.state != 'open':
raise UserError(_('In order to delete a bank statement, you must first cancel it to delete related journal items.'))
# Explicitly unlink bank statement lines so it will check that the related journal entries have been deleted first
statement.line_ids.unlink()
return super(AccountBankStatement, self).unlink()
@api.multi
def open_cashbox_id(self):
context = dict(self.env.context or {})
if context.get('cashbox_id'):
context['active_id'] = self.id
return {
'name': _('Cash Control'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.bank.statement.cashbox',
'view_id': self.env.ref('account.view_account_bnk_stmt_cashbox').id,
'type': 'ir.actions.act_window',
'res_id': self.env.context.get('cashbox_id'),
'context': context,
'target': 'new'
}
@api.multi
def button_cancel(self):
for statement in self:
if any(line.journal_entry_ids.ids for line in statement.line_ids):
raise UserError(_('A statement cannot be canceled when its lines are reconciled.'))
self.state = 'open'
@api.multi
def check_confirm_bank(self):
if self.journal_type == 'cash' and not self.currency_id.is_zero(self.difference):
action_rec = self.env['ir.model.data'].xmlid_to_object('account.action_view_account_bnk_stmt_check')
if action_rec:
action = action_rec.read([])[0]
return action
return self.button_confirm_bank()
@api.multi
def button_confirm_bank(self):
self._balance_check()
statements = self.filtered(lambda r: r.state == 'open')
for statement in statements:
moves = self.env['account.move']
for st_line in statement.line_ids:
if st_line.account_id and not st_line.journal_entry_ids.ids:
st_line.fast_counterpart_creation()
elif not st_line.journal_entry_ids.ids:
raise UserError(_('All the account entries lines must be processed in order to close the statement.'))
moves = (moves | st_line.journal_entry_ids)
if moves:
moves.post()
statement.message_post(body=_('Statement %s confirmed, journal items were created.') % (statement.name,))
statements.link_bank_to_partner()
statements.write({'state': 'confirm', 'date_done': time.strftime("%Y-%m-%d %H:%M:%S")})
@api.multi
def button_journal_entries(self):
context = dict(self._context or {})
context['journal_id'] = self.journal_id.id
return {
'name': _('Journal Items'),
'view_type': 'form',
'view_mode': 'tree',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'domain': [('statement_id', 'in', self.ids)],
'context': context,
}
@api.multi
def button_open(self):
""" Changes statement state to Running."""
for statement in self:
if not statement.name:
context = {'ir_sequence_date', statement.date}
if statement.journal_id.sequence_id:
st_number = statement.journal_id.sequence_id.with_context(context).next_by_id()
else:
SequenceObj = self.env['ir.sequence']
st_number = SequenceObj.with_context(context).next_by_code('account.bank.statement')
statement.name = st_number
statement.state = 'open'
@api.multi
def reconciliation_widget_preprocess(self):
""" Get statement lines of the specified statements or all unreconciled statement lines and try to automatically reconcile them / find them a partner.
Return ids of statement lines left to reconcile and other data for the reconciliation widget.
"""
statements = self
bsl_obj = self.env['account.bank.statement.line']
# NB : The field account_id can be used at the statement line creation/import to avoid the reconciliation process on it later on,
# this is why we filter out statements lines where account_id is set
st_lines_filter = [('journal_entry_ids', '=', False), ('account_id', '=', False)]
if statements:
st_lines_filter += [('statement_id', 'in', statements.ids)]
# Try to automatically reconcile statement lines
automatic_reconciliation_entries = []
st_lines_left = self.env['account.bank.statement.line']
for st_line in bsl_obj.search(st_lines_filter):
res = st_line.auto_reconcile()
if not res:
st_lines_left = (st_lines_left | st_line)
else:
automatic_reconciliation_entries.append(res.ids)
# Try to set statement line's partner
for st_line in st_lines_left:
if st_line.name and not st_line.partner_id:
additional_domain = [('ref', '=', st_line.name)]
match_recs = st_line.get_move_lines_for_reconciliation(limit=1, additional_domain=additional_domain, overlook_partner=True)
if match_recs and match_recs[0].partner_id:
st_line.write({'partner_id': match_recs[0].partner_id.id})
# Collect various informations for the reconciliation widget
notifications = []
num_auto_reconciled = len(automatic_reconciliation_entries)
if num_auto_reconciled > 0:
auto_reconciled_message = num_auto_reconciled > 1 \
and _("%d transactions were automatically reconciled.") % num_auto_reconciled \
or _("1 transaction was automatically reconciled.")
notifications += [{
'type': 'info',
'message': auto_reconciled_message,
'details': {
'name': _("Automatically reconciled items"),
'model': 'account.move',
'ids': automatic_reconciliation_entries
}
}]
lines = []
for el in statements:
lines.extend(el.line_ids.ids)
lines = list(set(lines))
return {
'st_lines_ids': st_lines_left.ids,
'notifications': notifications,
'statement_name': len(statements) == 1 and statements[0].name or False,
'num_already_reconciled_lines': statements and bsl_obj.search_count([('journal_entry_ids', '!=', False), ('id', 'in', lines)]) or 0,
}
@api.multi
def link_bank_to_partner(self):
for statement in self:
for st_line in statement.line_ids:
if st_line.bank_account_id and st_line.partner_id and st_line.bank_account_id.partner_id.id != st_line.partner_id.id:
bank_vals = st_line.bank_account_id.onchange_partner_id(st_line.partner_id.id)['value']
bank_vals.update({'partner_id': st_line.partner_id.id})
st_line.bank_account_id.write(bank_vals)
class AccountBankStatementLine(models.Model):
_name = "account.bank.statement.line"
_description = "Bank Statement Line"
_order = "statement_id desc, sequence"
_inherit = ['ir.needaction_mixin']
name = fields.Char(string='Memo', required=True)
date = fields.Date(required=True, default=lambda self: self._context.get('date', fields.Date.context_today(self)))
amount = fields.Monetary(digits=0, currency_field='journal_currency_id')
journal_currency_id = fields.Many2one('res.currency', related='statement_id.currency_id',
help='Utility field to express amount currency', readonly=True)
partner_id = fields.Many2one('res.partner', string='Partner')
bank_account_id = fields.Many2one('res.partner.bank', string='Bank Account')
account_id = fields.Many2one('account.account', string='Counterpart Account', domain=[('deprecated', '=', False)],
help="This technical field can be used at the statement line creation/import time in order to avoid the reconciliation"
" process on it later on. The statement line will simply create a counterpart on this account")
statement_id = fields.Many2one('account.bank.statement', string='Statement', index=True, required=True, ondelete='cascade')
journal_id = fields.Many2one('account.journal', related='statement_id.journal_id', string='Journal', store=True, readonly=True)
partner_name = fields.Char(help="This field is used to record the third party name when importing bank statement in electronic format,"
" when the partner doesn't exist yet in the database (or cannot be found).")
ref = fields.Char(string='Reference')
note = fields.Text(string='Notes')
sequence = fields.Integer(index=True, help="Gives the sequence order when displaying a list of bank statement lines.", default=1)
company_id = fields.Many2one('res.company', related='statement_id.company_id', string='Company', store=True, readonly=True)
journal_entry_ids = fields.One2many('account.move', 'statement_line_id', 'Journal Entries', copy=False, readonly=True)
amount_currency = fields.Monetary(help="The amount expressed in an optional other currency if it is a multi-currency entry.")
currency_id = fields.Many2one('res.currency', string='Currency', help="The optional other currency if it is a multi-currency entry.")
@api.one
@api.constrains('amount')
def _check_amount(self):
# This constraint could possibly underline flaws in bank statement import (eg. inability to
# support hacks such as using dummy transactions to give additional informations)
if self.amount == 0:
raise ValidationError(_('A transaction can\'t have a 0 amount.'))
@api.one
@api.constrains('amount', 'amount_currency')
def _check_amount_currency(self):
if self.amount_currency != 0 and self.amount == 0:
raise ValidationError(_('If "Amount Currency" is specified, then "Amount" must be as well.'))
@api.multi
def unlink(self):
for line in self:
if line.journal_entry_ids.ids:
raise UserError(_('In order to delete a bank statement line, you must first cancel it to delete related journal items.'))
return super(AccountBankStatementLine, self).unlink()
@api.model
def _needaction_domain_get(self):
return [('journal_entry_ids', '=', False), ('account_id', '=', False)]
@api.multi
def button_cancel_reconciliation(self):
# TOCKECK : might not behave as expected in case of reconciliations (match statement line with already
# registered payment) or partial reconciliations : it will completely remove the existing payment.
move_recs = self.env['account.move']
for st_line in self:
move_recs = (move_recs | st_line.journal_entry_ids)
if move_recs:
for move in move_recs:
move.line_ids.remove_move_reconcile()
move_recs.write({'statement_line_id': False})
move_recs.button_cancel()
move_recs.unlink()
####################################################
# Reconciliation interface methods
####################################################
@api.multi
def get_data_for_reconciliation_widget(self, excluded_ids=None):
""" Returns the data required to display a reconciliation widget, for each statement line in self """
excluded_ids = excluded_ids or []
ret = []
for st_line in self:
aml_recs = st_line.get_reconciliation_proposition(excluded_ids=excluded_ids)
target_currency = st_line.currency_id or st_line.journal_id.currency_id or st_line.journal_id.company_id.currency_id
rp = aml_recs.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency, target_date=st_line.date)
excluded_ids += [move_line['id'] for move_line in rp]
ret.append({
'st_line': st_line.get_statement_line_for_reconciliation_widget(),
'reconciliation_proposition': rp
})
return ret
def get_statement_line_for_reconciliation_widget(self):
""" Returns the data required by the bank statement reconciliation widget to display a statement line """
statement_currency = self.journal_id.currency_id or self.journal_id.company_id.currency_id
if self.amount_currency and self.currency_id:
amount = self.amount_currency
amount_currency = self.amount
amount_currency_str = amount_currency > 0 and amount_currency or -amount_currency
amount_currency_str = formatLang(self.env, amount_currency_str, currency_obj=statement_currency)
else:
amount = self.amount
amount_currency_str = ""
amount_str = formatLang(self.env, abs(amount), currency_obj=self.currency_id or statement_currency)
data = {
'id': self.id,
'ref': self.ref,
'note': self.note or "",
'name': self.name,
'date': self.date,
'amount': amount,
'amount_str': amount_str, # Amount in the statement line currency
'currency_id': self.currency_id.id or statement_currency.id,
'partner_id': self.partner_id.id,
'journal_id': self.journal_id.id,
'statement_id': self.statement_id.id,
'account_code': self.journal_id.default_debit_account_id.code,
'account_name': self.journal_id.default_debit_account_id.name,
'partner_name': self.partner_id.name,
'communication_partner_name': self.partner_name,
'amount_currency_str': amount_currency_str, # Amount in the statement currency
'has_no_partner': not self.partner_id.id,
}
if self.partner_id:
if amount > 0:
data['open_balance_account_id'] = self.partner_id.property_account_receivable_id.id
else:
data['open_balance_account_id'] = self.partner_id.property_account_payable_id.id
return data
@api.multi
def get_move_lines_for_reconciliation_widget(self, excluded_ids=None, str=False, offset=0, limit=None):
""" Returns move lines for the bank statement reconciliation widget, formatted as a list of dicts
"""
aml_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, str=str, offset=offset, limit=limit)
target_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
return aml_recs.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency, target_date=self.date)
####################################################
# Reconciliation methods
####################################################
def get_move_lines_for_reconciliation(self, excluded_ids=None, str=False, offset=0, limit=None, additional_domain=None, overlook_partner=False):
""" Return account.move.line records which can be used for bank statement reconciliation.
:param excluded_ids:
:param str:
:param offset:
:param limit:
:param additional_domain:
:param overlook_partner:
"""
# Domain to fetch registered payments (use case where you encode the payment before you get the bank statement)
reconciliation_aml_accounts = [self.journal_id.default_credit_account_id.id, self.journal_id.default_debit_account_id.id]
domain_reconciliation = ['&', ('statement_id', '=', False), ('account_id', 'in', reconciliation_aml_accounts)]
# Domain to fetch unreconciled payables/receivables (use case where you close invoices/refunds by reconciling your bank statements)
domain_matching = [('reconciled', '=', False)]
if self.partner_id.id or overlook_partner:
domain_matching = expression.AND([domain_matching, [('account_id.internal_type', 'in', ['payable', 'receivable'])]])
else:
# TODO : find out what use case this permits (match a check payment, registered on a journal whose account type is other instead of liquidity)
domain_matching = expression.AND([domain_matching, [('account_id.reconcile', '=', True)]])
# Let's add what applies to both
domain = expression.OR([domain_reconciliation, domain_matching])
if self.partner_id.id and not overlook_partner:
domain = expression.AND([domain, [('partner_id', '=', self.partner_id.id)]])
# Domain factorized for all reconciliation use cases
ctx = dict(self._context or {})
ctx['bank_statement_line'] = self
generic_domain = self.env['account.move.line'].with_context(ctx).domain_move_lines_for_reconciliation(excluded_ids=excluded_ids, str=str)
domain = expression.AND([domain, generic_domain])
# Domain from caller
if additional_domain is None:
additional_domain = []
else:
additional_domain = expression.normalize_domain(additional_domain)
domain = expression.AND([domain, additional_domain])
return self.env['account.move.line'].search(domain, offset=offset, limit=limit, order="date_maturity asc, id asc")
def _get_domain_maker_move_line_amount(self):
""" Returns a function that can create the appropriate domain to search on move.line amount based on statement.line currency/amount """
company_currency = self.journal_id.company_id.currency_id
st_line_currency = self.currency_id or self.journal_id.currency_id
currency = (st_line_currency and st_line_currency != company_currency) and st_line_currency.id or False
field = currency and 'amount_residual_currency' or 'amount_residual'
precision = st_line_currency and st_line_currency.decimal_places or company_currency.decimal_places
def ret(comparator, amount, p=precision, f=field, c=currency):
if comparator == '<':
if amount < 0:
domain = [(f, '<', 0), (f, '>', amount)]
else:
domain = [(f, '>', 0), (f, '<', amount)]
elif comparator == '=':
domain = [(f, '=', float_round(amount, precision_digits=p))]
else:
raise UserError(_("Programmation error : domain_maker_move_line_amount requires comparator '=' or '<'"))
domain += [('currency_id', '=', c)]
return domain
return ret
def get_reconciliation_proposition(self, excluded_ids=None):
""" Returns move lines that constitute the best guess to reconcile a statement line
Note: it only looks for move lines in the same currency as the statement line.
"""
# Look for structured communication match
if self.name:
overlook_partner = not self.partner_id # If the transaction has no partner, look for match in payable and receivable account anyway
domain = [('ref', '=', self.name)]
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) == 1:
return match_recs
# How to compare statement line amount and move lines amount
amount_domain_maker = self._get_domain_maker_move_line_amount()
amount = self.amount_currency or self.amount
# Look for a single move line with the same amount
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=1, additional_domain=amount_domain_maker('=', amount))
if match_recs:
return match_recs
if not self.partner_id:
return self.env['account.move.line']
# Select move lines until their total amount is greater than the statement line amount
domain = [('reconciled', '=', False)]
domain += [('account_id.user_type_id.type', '=', amount > 0 and 'receivable' or 'payable')] # Make sure we can't mix receivable and payable
domain += amount_domain_maker('<', amount) # Will also enforce > 0
mv_lines = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=5, additional_domain=domain)
st_line_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
ret = self.env['account.move.line']
total = 0
for line in mv_lines:
total += line.currency_id and line.amount_residual_currency or line.amount_residual
if float_compare(total, abs(amount), precision_digits=st_line_currency.rounding) != -1:
break
ret = (ret | line)
return ret
def _get_move_lines_for_auto_reconcile(self):
""" Returns the move lines that the method auto_reconcile can use to try to reconcile the statement line """
pass
@api.multi
def auto_reconcile(self):
""" Try to automatically reconcile the statement.line ; return the counterpart journal entry/ies if the automatic reconciliation succeeded, False otherwise.
TODO : this method could be greatly improved and made extensible
"""
self.ensure_one()
match_recs = self.env['account.move.line']
# How to compare statement line amount and move lines amount
amount_domain_maker = self._get_domain_maker_move_line_amount()
equal_amount_domain = amount_domain_maker('=', self.amount_currency or self.amount)
# Look for structured communication match
if self.name:
overlook_partner = not self.partner_id # If the transaction has no partner, look for match in payable and receivable account anyway
domain = equal_amount_domain + [('ref', '=', self.name)]
match_recs = self.get_move_lines_for_reconciliation(limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) != 1:
return False
# Look for a single move line with the same partner, the same amount
if not match_recs:
if self.partner_id:
match_recs = self.get_move_lines_for_reconciliation(limit=2, additional_domain=equal_amount_domain)
if match_recs and len(match_recs) != 1:
return False
if not match_recs:
return False
# Now reconcile
counterpart_aml_dicts = []
payment_aml_rec = self.env['account.move.line']
for aml in match_recs:
if aml.account_id.internal_type == 'liquidity':
payment_aml_rec = (payment_aml_rec | aml)
else:
amount = aml.currency_id and aml.amount_residual_currency or aml.amount_residual
counterpart_aml_dicts.append({
'name': aml.name if aml.name != '/' else aml.move_id.name,
'debit': amount < 0 and -amount or 0,
'credit': amount > 0 and amount or 0,
'move_line': aml
})
try:
with self._cr.savepoint():
counterpart = self.process_reconciliation(counterpart_aml_dicts=counterpart_aml_dicts, payment_aml_rec=payment_aml_rec)
return counterpart
except UserError:
# A configuration / business logic error that makes it impossible to auto-reconcile should not be raised
# since automatic reconciliation is just an amenity and the user will get the same exception when manually
# reconciling. Other types of exception are (hopefully) programmation errors and should cause a stacktrace.
self.invalidate_cache()
self.env['account.move'].invalidate_cache()
self.env['account.move.line'].invalidate_cache()
return False
def _prepare_reconciliation_move(self, move_name):
""" Prepare the dict of values to create the move from a statement line. This method may be overridden to adapt domain logic
through model inheritance (make sure to call super() to establish a clean extension chain).
:param char st_line_number: will be used as the name of the generated account move
:return: dict of value to create() the account.move
"""
return {
'statement_line_id': self.id,
'journal_id': self.statement_id.journal_id.id,
'date': self.date,
'name': move_name,
'ref': self.ref,
}
def _prepare_reconciliation_move_line(self, move, amount):
""" Prepare the dict of values to create the move line from a statement line.
:param recordset move: the account.move to link the move line
:param float amount: the amount of transaction that wasn't already reconciled
"""
company_currency = self.journal_id.company_id.currency_id
statement_currency = self.journal_id.currency_id or company_currency
st_line_currency = self.currency_id or statement_currency
amount_currency = False
if statement_currency != company_currency or st_line_currency != company_currency:
# First get the ratio total mount / amount not already reconciled
if statement_currency == company_currency:
total_amount = self.amount
elif st_line_currency == company_currency:
total_amount = self.amount_currency
else:
total_amount = statement_currency.with_context({'date': self.date}).compute(self.amount, company_currency)
ratio = total_amount / amount
# Then use it to adjust the statement.line field that correspond to the move.line amount_currency
if statement_currency != company_currency:
amount_currency = self.amount * ratio
elif st_line_currency != company_currency:
amount_currency = self.amount_currency * ratio
return {
'name': self.name,
'date': self.date,
'ref': self.ref,
'move_id': move.id,
'partner_id': self.partner_id and self.partner_id.id or False,
'account_id': amount >= 0 \
and self.statement_id.journal_id.default_credit_account_id.id \
or self.statement_id.journal_id.default_debit_account_id.id,
'credit': amount < 0 and -amount or 0.0,
'debit': amount > 0 and amount or 0.0,
'statement_id': self.statement_id.id,
'journal_id': self.statement_id.journal_id.id,
'currency_id': statement_currency != company_currency and statement_currency.id or (st_line_currency != company_currency and st_line_currency.id or False),
'amount_currency': amount_currency,
}
@api.v7
def process_reconciliations(self, cr, uid, ids, data, context=None):
""" Handles data sent from the bank statement reconciliation widget (and can otherwise serve as an old-API bridge)
:param list of dicts data: must contains the keys 'counterpart_aml_dicts', 'payment_aml_ids' and 'new_aml_dicts',
whose value is the same as described in process_reconciliation except that ids are used instead of recordsets.
"""
aml_obj = self.pool['account.move.line']
for id, datum in zip(ids, data):
st_line = self.browse(cr, uid, id, context)
payment_aml_rec = aml_obj.browse(cr, uid, datum.get('payment_aml_ids', []), context)
for aml_dict in datum.get('counterpart_aml_dicts', []):
aml_dict['move_line'] = aml_obj.browse(cr, uid, aml_dict['counterpart_aml_id'], context)
del aml_dict['counterpart_aml_id']
st_line.process_reconciliation(datum.get('counterpart_aml_dicts', []), payment_aml_rec, datum.get('new_aml_dicts', []))
def fast_counterpart_creation(self):
for st_line in self:
# Technical functionality to automatically reconcile by creating a new move line
vals = {
'name': st_line.name,
'debit': st_line.amount < 0 and -st_line.amount or 0.0,
'credit': st_line.amount > 0 and st_line.amount or 0.0,
'account_id': st_line.account_id.id,
}
st_line.process_reconciliation(new_aml_dicts=[vals])
def process_reconciliation(self, counterpart_aml_dicts=None, payment_aml_rec=None, new_aml_dicts=None):
""" Match statement lines with existing payments (eg. checks) and/or payables/receivables (eg. invoices and refunds) and/or new move lines (eg. write-offs).
If any new journal item needs to be created (via new_aml_dicts or counterpart_aml_dicts), a new journal entry will be created and will contain those
items, as well as a journal item for the bank statement line.
Finally, mark the statement line as reconciled by putting the matched moves ids in the column journal_entry_ids.
:param (list of dicts) counterpart_aml_dicts: move lines to create to reconcile with existing payables/receivables.
The expected keys are :
- 'name'
- 'debit'
- 'credit'
- 'move_line'
# The move line to reconcile (partially if specified debit/credit is lower than move line's credit/debit)
:param (list of recordsets) payment_aml_rec: recordset move lines representing existing payments (which are already fully reconciled)
:param (list of dicts) new_aml_dicts: move lines to create. The expected keys are :
- 'name'
- 'debit'
- 'credit'
- 'account_id'
- (optional) 'tax_ids'
- (optional) Other account.move.line fields like analytic_account_id or analytics_id
:returns: The journal entries with which the transaction was matched. If there was at least an entry in counterpart_aml_dicts or new_aml_dicts, this list contains
the move created by the reconciliation, containing entries for the statement.line (1), the counterpart move lines (0..*) and the new move lines (0..*).
"""
counterpart_aml_dicts = counterpart_aml_dicts or []
payment_aml_rec = payment_aml_rec or self.env['account.move.line']
new_aml_dicts = new_aml_dicts or []
aml_obj = self.env['account.move.line']
company_currency = self.journal_id.company_id.currency_id
statement_currency = self.journal_id.currency_id or company_currency
st_line_currency = self.currency_id or statement_currency
counterpart_moves = self.env['account.move']
# Check and prepare received data
if self.journal_entry_ids.ids:
raise UserError(_('The bank statement line was already reconciled.'))
if any(rec.statement_id for rec in payment_aml_rec):
raise UserError(_('A selected move line was already reconciled.'))
for aml_dict in counterpart_aml_dicts:
if aml_dict['move_line'].reconciled:
raise UserError(_('A selected move line was already reconciled.'))
if isinstance(aml_dict['move_line'], (int, long)):
aml_dict['move_line'] = aml_obj.browse(aml_dict['move_line'])
for aml_dict in (counterpart_aml_dicts + new_aml_dicts):
if aml_dict.get('tax_ids') and aml_dict['tax_ids'] and isinstance(aml_dict['tax_ids'][0], (int, long)):
# Transform the value in the format required for One2many and Many2many fields
aml_dict['tax_ids'] = map(lambda id: (4, id, None), aml_dict['tax_ids'])
# Fully reconciled moves are just linked to the bank statement
for aml_rec in payment_aml_rec:
aml_rec.write({'statement_id': self.statement_id.id})
aml_rec.move_id.write({'statement_line_id': self.id})
counterpart_moves = (counterpart_moves | aml_rec.move_id)
# Create move line(s). Either matching an existing journal entry (eg. invoice), in which
# case we reconcile the existing and the new move lines together, or being a write-off.
if counterpart_aml_dicts or new_aml_dicts:
st_line_currency = self.currency_id or statement_currency
st_line_currency_rate = self.currency_id and (self.amount_currency / self.amount) or False
# Create the move
move_name = (self.statement_id.name or self.name) + "/" + str(self.sequence)
move_vals = self._prepare_reconciliation_move(move_name)
move = self.env['account.move'].create(move_vals)
move.post()
counterpart_moves = (counterpart_moves | move)
# Complete dicts to create both counterpart move lines and write-offs
to_create = (counterpart_aml_dicts + new_aml_dicts)
ctx = dict(self._context, date=self.date)
for aml_dict in to_create:
aml_dict['move_id'] = move.id
aml_dict['date'] = self.statement_id.date
aml_dict['partner_id'] = self.partner_id.id
aml_dict['journal_id'] = self.journal_id.id
aml_dict['company_id'] = self.company_id.id
aml_dict['statement_id'] = self.statement_id.id
if st_line_currency.id != company_currency.id:
aml_dict['amount_currency'] = aml_dict['debit'] - aml_dict['credit']
aml_dict['currency_id'] = st_line_currency.id
if self.currency_id and statement_currency.id == company_currency.id and st_line_currency_rate:
# Statement is in company currency but the transaction is in foreign currency
aml_dict['debit'] = company_currency.round(aml_dict['debit'] / st_line_currency_rate)
aml_dict['credit'] = company_currency.round(aml_dict['credit'] / st_line_currency_rate)
elif self.currency_id and st_line_currency_rate:
# Statement is in foreign currency and the transaction is in another one
aml_dict['debit'] = statement_currency.with_context(ctx).compute(aml_dict['debit'] / st_line_currency_rate, company_currency)
aml_dict['credit'] = statement_currency.with_context(ctx).compute(aml_dict['credit'] / st_line_currency_rate, company_currency)
else:
# Statement is in foreign currency and no extra currency is given for the transaction
aml_dict['debit'] = st_line_currency.with_context(ctx).compute(aml_dict['debit'], company_currency)
aml_dict['credit'] = st_line_currency.with_context(ctx).compute(aml_dict['credit'], company_currency)
elif statement_currency.id != company_currency.id:
# Statement is in foreign currency but the transaction is in company currency
prorata_factor = (aml_dict['debit'] - aml_dict['credit']) / self.amount_currency
aml_dict['amount_currency'] = prorata_factor * self.amount
aml_dict['currency_id'] = statement_currency.id
# Create the move line for the statement line using the total credit/debit of the counterpart
# This leaves out the amount already reconciled and avoids rounding errors from currency conversion
st_line_amount = sum(aml_dict['credit'] - aml_dict['debit'] for aml_dict in to_create)
aml_obj.with_context(check_move_validity=False).create(self._prepare_reconciliation_move_line(move, st_line_amount))
# Create write-offs
for aml_dict in new_aml_dicts:
aml_obj.with_context(check_move_validity=False).create(aml_dict)
# Create counterpart move lines and reconcile them
for aml_dict in counterpart_aml_dicts:
if aml_dict['move_line'].partner_id.id:
aml_dict['partner_id'] = aml_dict['move_line'].partner_id.id
aml_dict['account_id'] = aml_dict['move_line'].account_id.id
counterpart_move_line = aml_dict.pop('move_line')
if counterpart_move_line.currency_id and counterpart_move_line.currency_id != company_currency and not aml_dict.get('currency_id'):
aml_dict['currency_id'] = counterpart_move_line.currency_id.id
aml_dict['amount_currency'] = company_currency.with_context(ctx).compute(aml_dict['debit'] - aml_dict['credit'], counterpart_move_line.currency_id)
new_aml = aml_obj.with_context(check_move_validity=False).create(aml_dict)
(new_aml | counterpart_move_line).reconcile()
counterpart_moves.assert_balanced()
return counterpart_moves
| zbqf109/goodo | openerp/addons/account/models/account_bank_statement.py | Python | gpl-3.0 | 46,649 |
#-*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from datetime import date, datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools import float_compare, float_is_zero
from openerp.tools.translate import _
from openerp.exceptions import UserError
class hr_payslip_line(osv.osv):
'''
Payslip Line
'''
_inherit = 'hr.payslip.line'
def _get_partner_id(self, cr, uid, payslip_line, credit_account, context=None):
"""
Get partner_id of slip line to use in account_move_line
"""
# use partner of salary rule or fallback on employee's address
partner_id = payslip_line.salary_rule_id.register_id.partner_id.id or \
payslip_line.slip_id.employee_id.address_home_id.id
if credit_account:
if payslip_line.salary_rule_id.register_id.partner_id or \
payslip_line.salary_rule_id.account_credit.internal_type in ('receivable', 'payable'):
return partner_id
else:
if payslip_line.salary_rule_id.register_id.partner_id or \
payslip_line.salary_rule_id.account_debit.internal_type in ('receivable', 'payable'):
return partner_id
return False
class hr_payslip(osv.osv):
'''
Pay Slip
'''
_inherit = 'hr.payslip'
_description = 'Pay Slip'
_columns = {
'date': fields.date('Date Account', states={'draft': [('readonly', False)]}, readonly=True, help="Keep empty to use the period of the validation(Payslip) date."),
'journal_id': fields.many2one('account.journal', 'Salary Journal',states={'draft': [('readonly', False)]}, readonly=True, required=True),
'move_id': fields.many2one('account.move', 'Accounting Entry', readonly=True, copy=False),
}
def _get_default_journal(self, cr, uid, context=None):
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'general')])
if res:
return res[0]
return False
_defaults = {
'journal_id': _get_default_journal,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'journal_id' in context:
vals.update({'journal_id': context.get('journal_id')})
return super(hr_payslip, self).create(cr, uid, vals, context=context)
def onchange_contract_id(self, cr, uid, ids, date_from, date_to, employee_id=False, contract_id=False, context=None):
contract_obj = self.pool.get('hr.contract')
res = super(hr_payslip, self).onchange_contract_id(cr, uid, ids, date_from=date_from, date_to=date_to, employee_id=employee_id, contract_id=contract_id, context=context)
journal_id = contract_id and contract_obj.browse(cr, uid, contract_id, context=context).journal_id.id or False
res['value'].update({'journal_id': journal_id})
return res
def cancel_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
move_ids = []
move_to_cancel = []
for slip in self.browse(cr, uid, ids, context=context):
if slip.move_id:
move_ids.append(slip.move_id.id)
if slip.move_id.state == 'posted':
move_to_cancel.append(slip.move_id.id)
move_pool.button_cancel(cr, uid, move_to_cancel, context=context)
move_pool.unlink(cr, uid, move_ids, context=context)
return super(hr_payslip, self).cancel_sheet(cr, uid, ids, context=context)
def process_sheet(self, cr, uid, ids, context=None):
move_pool = self.pool.get('account.move')
hr_payslip_line_pool = self.pool['hr.payslip.line']
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Payroll')
timenow = time.strftime('%Y-%m-%d')
for slip in self.browse(cr, uid, ids, context=context):
line_ids = []
debit_sum = 0.0
credit_sum = 0.0
date = timenow
name = _('Payslip of %s') % (slip.employee_id.name)
move = {
'narration': name,
'ref': slip.number,
'journal_id': slip.journal_id.id,
'date': date,
}
for line in slip.details_by_salary_rule_category:
amt = slip.credit_note and -line.total or line.total
if float_is_zero(amt, precision_digits=precision):
continue
debit_account_id = line.salary_rule_id.account_debit.id
credit_account_id = line.salary_rule_id.account_credit.id
if debit_account_id:
debit_line = (0, 0, {
'name': line.name,
'partner_id': hr_payslip_line_pool._get_partner_id(cr, uid, line, credit_account=False, context=context),
'account_id': debit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amt > 0.0 and amt or 0.0,
'credit': amt < 0.0 and -amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_line_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
})
line_ids.append(debit_line)
debit_sum += debit_line[2]['debit'] - debit_line[2]['credit']
if credit_account_id:
credit_line = (0, 0, {
'name': line.name,
'partner_id': hr_payslip_line_pool._get_partner_id(cr, uid, line, credit_account=True, context=context),
'account_id': credit_account_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': amt < 0.0 and -amt or 0.0,
'credit': amt > 0.0 and amt or 0.0,
'analytic_account_id': line.salary_rule_id.analytic_account_id and line.salary_rule_id.analytic_account_id.id or False,
'tax_line_id': line.salary_rule_id.account_tax_id and line.salary_rule_id.account_tax_id.id or False,
})
line_ids.append(credit_line)
credit_sum += credit_line[2]['credit'] - credit_line[2]['debit']
if float_compare(credit_sum, debit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_credit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Credit Account!') % (slip.journal_id.name))
adjust_credit = (0, 0, {
'name': _('Adjustment Entry'),
'date': timenow,
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': 0.0,
'credit': debit_sum - credit_sum,
})
line_ids.append(adjust_credit)
elif float_compare(debit_sum, credit_sum, precision_digits=precision) == -1:
acc_id = slip.journal_id.default_debit_account_id.id
if not acc_id:
raise UserError(_('The Expense Journal "%s" has not properly configured the Debit Account!') % (slip.journal_id.name))
adjust_debit = (0, 0, {
'name': _('Adjustment Entry'),
'partner_id': False,
'account_id': acc_id,
'journal_id': slip.journal_id.id,
'date': date,
'debit': credit_sum - debit_sum,
'credit': 0.0,
})
line_ids.append(adjust_debit)
move.update({'line_ids': line_ids})
move_id = move_pool.create(cr, uid, move, context=context)
self.write(cr, uid, [slip.id], {'move_id': move_id, 'date' : date}, context=context)
move_pool.post(cr, uid, [move_id], context=context)
return super(hr_payslip, self).process_sheet(cr, uid, [slip.id], context=context)
class hr_salary_rule(osv.osv):
_inherit = 'hr.salary.rule'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account', domain=[('account_type', '=', 'normal')]),
'account_tax_id':fields.many2one('account.tax', 'Tax'),
'account_debit': fields.many2one('account.account', 'Debit Account', domain=[('deprecated', '=', False)]),
'account_credit': fields.many2one('account.account', 'Credit Account', domain=[('deprecated', '=', False)]),
}
class hr_contract(osv.osv):
_inherit = 'hr.contract'
_description = 'Employee Contract'
_columns = {
'analytic_account_id':fields.many2one('account.analytic.account', 'Analytic Account', domain=[('account_type', '=', 'normal')]),
'journal_id': fields.many2one('account.journal', 'Salary Journal'),
}
class hr_payslip_run(osv.osv):
_inherit = 'hr.payslip.run'
_description = 'Payslip Run'
_columns = {
'journal_id': fields.many2one('account.journal', 'Salary Journal', states={'draft': [('readonly', False)]}, readonly=True, required=True),
}
def _get_default_journal(self, cr, uid, context=None):
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'general')])
if res:
return res[0]
return False
_defaults = {
'journal_id': _get_default_journal,
}
| QinerTech/QinerApps | openerp/addons/hr_payroll_account/hr_payroll_account.py | Python | gpl-3.0 | 9,968 |
# Orca
#
# Copyright 2006-2009 Sun Microsystems Inc.
# Copyright 2010 Joanmarie Diggs
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc., " \
"Copyright (c) 2010 Joanmarie Diggs"
__license__ = "LGPL"
import pyatspi
import orca.scripts.default as default
import orca.input_event as input_event
import orca.orca as orca
import orca.orca_state as orca_state
from .script_utilities import Utilities
from .speech_generator import SpeechGenerator
from .formatting import Formatting
########################################################################
# #
# The Java script class. #
# #
########################################################################
class Script(default.Script):
def __init__(self, app):
"""Creates a new script for Java applications.
Arguments:
- app: the application to create a script for.
"""
default.Script.__init__(self, app)
# Some objects which issue descendant changed events lack
# STATE_MANAGES_DESCENDANTS. As a result, onSelectionChanged
# doesn't ignore these objects. That in turn causes Orca to
# double-speak some items and/or set the locusOfFocus to a
# parent it shouldn't. See bgo#616582. [[[TODO - JD: remove
# this hack if and when we get a fix for that bug]]]
#
self.lastDescendantChangedSource = None
def getSpeechGenerator(self):
"""Returns the speech generator for this script."""
return SpeechGenerator(self)
def getFormatting(self):
"""Returns the formatting strings for this script."""
return Formatting(self)
def getUtilities(self):
"""Returns the utilites for this script."""
return Utilities(self)
def checkKeyboardEventData(self, keyboardEvent):
"""Checks the data on the keyboard event.
Some toolkits don't fill all the key event fields, and/or fills
them out with unexpected data. This method tries to fill in the
missing fields and validate/standardize the data we've been given.
While any script can override this method, it is expected that
this will only be done at the toolkit script level.
Arguments:
- keyboardEvent: an instance of input_event.KeyboardEvent
"""
default.Script.checkKeyboardEventData(self, keyboardEvent)
if not keyboardEvent.keyval_name:
return
from gi.repository import Gdk
keymap = Gdk.Keymap.get_default()
keyval = Gdk.keyval_from_name(keyboardEvent.keyval_name)
success, entries = keymap.get_entries_for_keyval(keyval)
for entry in entries:
if entry.group == 0:
keyboardEvent.hw_code = entry.keycode
break
# Put the event_string back to what it was prior to the Java
# Atk Wrapper hack which gives us the keyname and not the
# expected and needed printable character for punctuation
# marks.
#
if keyboardEvent.event_string == keyboardEvent.keyval_name \
and len(keyboardEvent.event_string) > 1:
keyval = Gdk.keyval_from_name(keyboardEvent.keyval_name)
if 0 < keyval < 256:
keyboardEvent.event_string = chr(keyval)
def onCaretMoved(self, event):
# Java's SpinButtons are the most caret movement happy thing
# I've seen to date. If you Up or Down on the keyboard to
# change the value, they typically emit three caret movement
# events, first to the beginning, then to the end, and then
# back to the beginning. It's a very excitable little widget.
# Luckily, it only issues one value changed event. So, we'll
# ignore caret movement events caused by value changes and
# just process the single value changed event.
#
isSpinBox = self.utilities.hasMatchingHierarchy(
event.source, [pyatspi.ROLE_TEXT,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_SPIN_BUTTON])
if isSpinBox:
eventStr, mods = self.utilities.lastKeyAndModifiers()
if eventStr in ["Up", "Down"] or isinstance(
orca_state.lastInputEvent, input_event.MouseButtonEvent):
return
default.Script.onCaretMoved(self, event)
def onSelectionChanged(self, event):
"""Called when an object's selection changes.
Arguments:
- event: the Event
"""
# Avoid doing this with objects that manage their descendants
# because they'll issue a descendant changed event. (Note: This
# equality check is intentional; utilities.isSameObject() is
# especially thorough with trees and tables, which is not
# performant.
#
if event.source == self.lastDescendantChangedSource:
return
# We treat selected children as the locus of focus. When the
# selection changes in a list we want to update the locus of
# focus. If there is no selection, we default the locus of
# focus to the containing object.
#
if (event.source.getRole() in [pyatspi.ROLE_LIST,
pyatspi.ROLE_PAGE_TAB_LIST,
pyatspi.ROLE_TREE]) \
and event.source.getState().contains(pyatspi.STATE_FOCUSED):
newFocus = event.source
if event.source.childCount:
selection = event.source.querySelection()
if selection.nSelectedChildren > 0:
newFocus = selection.getSelectedChild(0)
orca.setLocusOfFocus(event, newFocus)
else:
default.Script.onSelectionChanged(self, event)
def onFocusedChanged(self, event):
"""Callback for object:state-changed:focused accessibility events."""
if not event.detail1:
return
obj = event.source
role = obj.getRole()
# Accessibility support for menus in Java is badly broken: Missing
# events, missing states, bogus events from other objects, etc.
# Therefore if we get an event, however broken, for menus or their
# their items that suggests they are selected, we'll just cross our
# fingers and hope that's true.
menuRoles = [pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_BAR,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_POPUP_MENU]
if role in menuRoles or obj.parent.getRole() in menuRoles:
orca.setLocusOfFocus(event, obj)
return
try:
focusRole = orca_state.locusOfFocus.getRole()
except:
focusRole = None
if focusRole in menuRoles and role == pyatspi.ROLE_ROOT_PANE:
return
default.Script.onFocusedChanged(self, event)
def onValueChanged(self, event):
"""Called whenever an object's value changes.
Arguments:
- event: the Event
"""
# We'll ignore value changed events for Java's toggle buttons since
# they also send a redundant object:state-changed:checked event.
#
ignoreRoles = [pyatspi.ROLE_TOGGLE_BUTTON,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_CHECK_BOX]
if event.source.getRole() in ignoreRoles:
return
# Java's SpinButtons are the most caret movement happy thing
# I've seen to date. If you Up or Down on the keyboard to
# change the value, they typically emit three caret movement
# events, first to the beginning, then to the end, and then
# back to the beginning. It's a very excitable little widget.
# Luckily, it only issues one value changed event. So, we'll
# ignore caret movement events caused by value changes and
# just process the single value changed event.
#
if event.source.getRole() == pyatspi.ROLE_SPIN_BUTTON:
try:
thisBox = orca_state.locusOfFocus.parent.parent == event.source
except:
thisBox = False
if thisBox:
self._presentTextAtNewCaretPosition(event,
orca_state.locusOfFocus)
return
default.Script.onValueChanged(self, event)
def skipObjectEvent(self, event):
# Accessibility support for menus in Java is badly broken. One problem
# is bogus focus claims following menu-related focus claims. Therefore
# in this particular toolkit, we mustn't skip events for menus.
menuRoles = [pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_BAR,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_POPUP_MENU]
if event.source.getRole() in menuRoles:
return False
return default.Script.skipObjectEvent(self, event)
| ruibarreira/linuxtrail | usr/lib/python3/dist-packages/orca/scripts/toolkits/J2SE-access-bridge/script.py | Python | gpl-3.0 | 10,245 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: file
version_added: historical
short_description: Manage files and file properties
extends_documentation_fragment: files
description:
- Set attributes of files, symlinks or directories.
- Alternatively, remove files, symlinks or directories.
- Many other modules support the same options as the C(file) module - including M(copy), M(template), and M(assemble).
- For Windows targets, use the M(win_file) module instead.
options:
path:
description:
- Path to the file being managed.
type: path
required: yes
aliases: [ dest, name ]
state:
description:
- If C(absent), directories will be recursively deleted, and files or symlinks will
be unlinked. In the case of a directory, if C(diff) is declared, you will see the files and folders deleted listed
under C(path_contents). Note that C(absent) will not cause C(file) to fail if the C(path) does
not exist as the state did not change.
- If C(directory), all intermediate subdirectories will be created if they
do not exist. Since Ansible 1.7 they will be created with the supplied permissions.
- If C(file), without any other options this works mostly as a 'stat' and will return the current state of C(path).
Even with other options (i.e C(mode)), the file will be modified but will NOT be created if it does not exist;
see the C(touch) value or the M(copy) or M(template) module if you want that behavior.
- If C(hard), the hard link will be created or changed.
- If C(link), the symbolic link will be created or changed.
- If C(touch) (new in 1.4), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way C(touch) works from the command line).
type: str
default: file
choices: [ absent, directory, file, hard, link, touch ]
src:
description:
- Path of the file to link to.
- This applies only to C(state=link) and C(state=hard).
- For C(state=link), this will also accept a non-existing path.
- Relative paths are relative to the file being created (C(path)) which is how
the Unix command C(ln -s SRC DEST) treats relative paths.
type: path
recurse:
description:
- Recursively set the specified file attributes on directory contents.
- This applies only when C(state) is set to C(directory).
type: bool
default: no
version_added: '1.1'
force:
description:
- >
Force the creation of the symlinks in two cases: the source file does
not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
C(path) file and create symlink to the C(src) file in place of it).
type: bool
default: no
follow:
description:
- This flag indicates that filesystem links, if they exist, should be followed.
- Previous to Ansible 2.5, this was C(no) by default.
type: bool
default: yes
version_added: '1.8'
modification_time:
description:
- This parameter indicates the time the file's modification time should be set to.
- Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
- Default is None meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
type: str
version_added: "2.7"
modification_time_format:
description:
- When used with C(modification_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
access_time:
description:
- This parameter indicates the time the file's access time should be set to.
- Should be C(preserve) when no modification is required, C(YYYYMMDDHHMM.SS) when using default time format, or C(now).
- Default is C(None) meaning that C(preserve) is the default for C(state=[file,directory,link,hard]) and C(now) is default for C(state=touch).
type: str
version_added: '2.7'
access_time_format:
description:
- When used with C(access_time), indicates the time format that must be used.
- Based on default Python format (see time.strftime doc).
type: str
default: "%Y%m%d%H%M.%S"
version_added: '2.7'
seealso:
- module: assemble
- module: copy
- module: stat
- module: template
- module: win_file
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Change file ownership, group and permissions
file:
path: /etc/foo.conf
owner: foo
group: foo
mode: '0644'
- name: Give insecure permissions to an existing file
file:
path: /work
owner: root
group: root
mode: '1777'
- name: Create a symbolic link
file:
src: /file/to/link/to
dest: /path/to/symlink
owner: foo
group: foo
state: link
- name: Create two hard links
file:
src: '/tmp/{{ item.src }}'
dest: '{{ item.dest }}'
state: hard
loop:
- { src: x, dest: y }
- { src: z, dest: k }
- name: Touch a file, using symbolic modes to set the permissions (equivalent to 0644)
file:
path: /etc/foo.conf
state: touch
mode: u=rw,g=r,o=r
- name: Touch the same file, but add/remove some permissions
file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
- name: Touch again the same file, but dont change times this makes the task idempotent
file:
path: /etc/foo.conf
state: touch
mode: u+rw,g-wx,o-rwx
modification_time: preserve
access_time: preserve
- name: Create a directory if it does not exist
file:
path: /etc/some_directory
state: directory
mode: '0755'
- name: Update modification and access time of given file
file:
path: /etc/some_file
state: file
modification_time: now
access_time: now
- name: Set access time based on seconds from epoch value
file:
path: /etc/another_file
state: file
access_time: '{{ "%Y%m%d%H%M.%S" | strftime(stat_var.stat.atime) }}'
- name: Recursively change ownership of a directory
file:
path: /etc/foo
state: directory
recurse: yes
owner: foo
group: foo
- name: Remove file (delete file)
file:
path: /etc/foo.txt
state: absent
- name: Recursively remove directory
file:
path: /etc/foo
state: absent
'''
RETURN = r'''
'''
import errno
import os
import shutil
import sys
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
# There will only be a single AnsibleModule object per module
module = None
class AnsibleModuleError(Exception):
def __init__(self, results):
self.results = results
def __repr__(self):
print('AnsibleModuleError(results={0})'.format(self.results))
class ParameterError(AnsibleModuleError):
pass
class Sentinel(object):
def __new__(cls, *args, **kwargs):
return cls
def _ansible_excepthook(exc_type, exc_value, tb):
# Using an exception allows us to catch it if the calling code knows it can recover
if issubclass(exc_type, AnsibleModuleError):
module.fail_json(**exc_value.results)
else:
sys.__excepthook__(exc_type, exc_value, tb)
def additional_parameter_handling(params):
"""Additional parameter validation and reformatting"""
# When path is a directory, rewrite the pathname to be the file inside of the directory
# TODO: Why do we exclude link? Why don't we exclude directory? Should we exclude touch?
# I think this is where we want to be in the future:
# when isdir(path):
# if state == absent: Remove the directory
# if state == touch: Touch the directory
# if state == directory: Assert the directory is the same as the one specified
# if state == file: place inside of the directory (use _original_basename)
# if state == link: place inside of the directory (use _original_basename. Fallback to src?)
# if state == hard: place inside of the directory (use _original_basename. Fallback to src?)
if (params['state'] not in ("link", "absent") and os.path.isdir(to_bytes(params['path'], errors='surrogate_or_strict'))):
basename = None
if params['_original_basename']:
basename = params['_original_basename']
elif params['src']:
basename = os.path.basename(params['src'])
if basename:
params['path'] = os.path.join(params['path'], basename)
# state should default to file, but since that creates many conflicts,
# default state to 'current' when it exists.
prev_state = get_state(to_bytes(params['path'], errors='surrogate_or_strict'))
if params['state'] is None:
if prev_state != 'absent':
params['state'] = prev_state
elif params['recurse']:
params['state'] = 'directory'
else:
params['state'] = 'file'
# make sure the target path is a directory when we're doing a recursive operation
if params['recurse'] and params['state'] != 'directory':
raise ParameterError(results={"msg": "recurse option requires state to be 'directory'",
"path": params["path"]})
# Fail if 'src' but no 'state' is specified
if params['src'] and params['state'] not in ('link', 'hard'):
raise ParameterError(results={'msg': "src option requires state to be 'link' or 'hard'",
'path': params['path']})
def get_state(path):
''' Find out current state '''
b_path = to_bytes(path, errors='surrogate_or_strict')
try:
if os.path.lexists(b_path):
if os.path.islink(b_path):
return 'link'
elif os.path.isdir(b_path):
return 'directory'
elif os.stat(b_path).st_nlink > 1:
return 'hard'
# could be many other things, but defaulting to file
return 'file'
return 'absent'
except OSError as e:
if e.errno == errno.ENOENT: # It may already have been removed
return 'absent'
else:
raise
# This should be moved into the common file utilities
def recursive_set_attributes(b_path, follow, file_args, mtime, atime):
changed = False
try:
for b_root, b_dirs, b_files in os.walk(b_path):
for b_fsobj in b_dirs + b_files:
b_fsname = os.path.join(b_root, b_fsobj)
if not os.path.islink(b_fsname):
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
else:
# Change perms on the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
if follow:
b_fsname = os.path.join(b_root, os.readlink(b_fsname))
# The link target could be nonexistent
if os.path.exists(b_fsname):
if os.path.isdir(b_fsname):
# Link is a directory so change perms on the directory's contents
changed |= recursive_set_attributes(b_fsname, follow, file_args, mtime, atime)
# Change perms on the file pointed to by the link
tmp_file_args = file_args.copy()
tmp_file_args['path'] = to_native(b_fsname, errors='surrogate_or_strict')
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed, expand=False)
changed |= update_timestamp_for_file(tmp_file_args['path'], mtime, atime)
except RuntimeError as e:
# on Python3 "RecursionError" is raised which is derived from "RuntimeError"
# TODO once this function is moved into the common file utilities, this should probably raise more general exception
raise AnsibleModuleError(
results={'msg': "Could not recursively set attributes on %s. Original error was: '%s'" % (to_native(b_path), to_native(e))}
)
return changed
def initial_diff(path, state, prev_state):
diff = {'before': {'path': path},
'after': {'path': path},
}
if prev_state != state:
diff['before']['state'] = prev_state
diff['after']['state'] = state
if state == 'absent' and prev_state == 'directory':
walklist = {
'directories': [],
'files': [],
}
b_path = to_bytes(path, errors='surrogate_or_strict')
for base_path, sub_folders, files in os.walk(b_path):
for folder in sub_folders:
folderpath = os.path.join(base_path, folder)
walklist['directories'].append(folderpath)
for filename in files:
filepath = os.path.join(base_path, filename)
walklist['files'].append(filepath)
diff['before']['path_content'] = walklist
return diff
#
# States
#
def get_timestamp_for_time(formatted_time, time_format):
if formatted_time == 'preserve':
return None
elif formatted_time == 'now':
return Sentinel
else:
try:
struct = time.strptime(formatted_time, time_format)
struct_time = time.mktime(struct)
except (ValueError, OverflowError) as e:
raise AnsibleModuleError(results={'msg': 'Error while obtaining timestamp for time %s using format %s: %s'
% (formatted_time, time_format, to_native(e, nonstring='simplerepr'))})
return struct_time
def update_timestamp_for_file(path, mtime, atime, diff=None):
b_path = to_bytes(path, errors='surrogate_or_strict')
try:
# When mtime and atime are set to 'now', rely on utime(path, None) which does not require ownership of the file
# https://github.com/ansible/ansible/issues/50943
if mtime is Sentinel and atime is Sentinel:
# It's not exact but we can't rely on os.stat(path).st_mtime after setting os.utime(path, None) as it may
# not be updated. Just use the current time for the diff values
mtime = atime = time.time()
previous_mtime = os.stat(b_path).st_mtime
previous_atime = os.stat(b_path).st_atime
set_time = None
else:
# If both parameters are None 'preserve', nothing to do
if mtime is None and atime is None:
return False
previous_mtime = os.stat(b_path).st_mtime
previous_atime = os.stat(b_path).st_atime
if mtime is None:
mtime = previous_mtime
elif mtime is Sentinel:
mtime = time.time()
if atime is None:
atime = previous_atime
elif atime is Sentinel:
atime = time.time()
# If both timestamps are already ok, nothing to do
if mtime == previous_mtime and atime == previous_atime:
return False
set_time = (atime, mtime)
os.utime(b_path, set_time)
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
if 'after' not in diff:
diff['after'] = {}
if mtime != previous_mtime:
diff['before']['mtime'] = previous_mtime
diff['after']['mtime'] = mtime
if atime != previous_atime:
diff['before']['atime'] = previous_atime
diff['after']['atime'] = atime
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while updating modification or access time: %s'
% to_native(e, nonstring='simplerepr'), 'path': path})
return True
def keep_backward_compatibility_on_timestamps(parameter, state):
if state in ['file', 'hard', 'directory', 'link'] and parameter is None:
return 'preserve'
elif state == 'touch' and parameter is None:
return 'now'
else:
return parameter
def execute_diff_peek(path):
"""Take a guess as to whether a file is a binary file"""
b_path = to_bytes(path, errors='surrogate_or_strict')
appears_binary = False
try:
with open(b_path, 'rb') as f:
head = f.read(8192)
except Exception:
# If we can't read the file, we're okay assuming it's text
pass
else:
if b"\x00" in head:
appears_binary = True
return appears_binary
def ensure_absent(path):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
result = {}
if prev_state != 'absent':
diff = initial_diff(path, 'absent', prev_state)
if not module.check_mode:
if prev_state == 'directory':
try:
shutil.rmtree(b_path, ignore_errors=False)
except Exception as e:
raise AnsibleModuleError(results={'msg': "rmtree failed: %s" % to_native(e)})
else:
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
raise AnsibleModuleError(results={'msg': "unlinking failed: %s " % to_native(e),
'path': path})
result.update({'path': path, 'changed': True, 'diff': diff, 'state': 'absent'})
else:
result.update({'path': path, 'changed': False, 'state': 'absent'})
return result
def execute_touch(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
changed = False
result = {'dest': path}
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if not module.check_mode:
if prev_state == 'absent':
# Create an empty file if the filename did not already exist
try:
open(b_path, 'wb').close()
changed = True
except (OSError, IOError) as e:
raise AnsibleModuleError(results={'msg': 'Error, could not touch target: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
# Update the attributes on the file
diff = initial_diff(path, 'touch', prev_state)
file_args = module.load_file_common_arguments(module.params)
try:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except SystemExit as e:
if e.code:
# We take this to mean that fail_json() was called from
# somewhere in basic.py
if prev_state == 'absent':
# If we just created the file we can safely remove it
os.remove(b_path)
raise
result['changed'] = changed
result['diff'] = diff
return result
def ensure_file_attributes(path, follow, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
if prev_state != 'file':
if follow and prev_state == 'link':
# follow symlink and operate on original
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
prev_state = get_state(b_path)
file_args['path'] = path
if prev_state not in ('file', 'hard'):
# file is not absent and any other state is a conflict
raise AnsibleModuleError(results={'msg': 'file (%s) is %s, cannot continue' % (path, prev_state),
'path': path})
diff = initial_diff(path, 'file', prev_state)
changed = module.set_fs_attributes_if_different(file_args, False, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_directory(path, follow, recurse, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# For followed symlinks, we need to operate on the target of the link
if follow and prev_state == 'link':
b_path = os.path.realpath(b_path)
path = to_native(b_path, errors='strict')
file_args['path'] = path
prev_state = get_state(b_path)
changed = False
diff = initial_diff(path, 'directory', prev_state)
if prev_state == 'absent':
# Create directory and assign permissions to it
if module.check_mode:
return {'changed': True, 'diff': diff}
curpath = ''
try:
# Split the path so we can apply filesystem attributes recursively
# from the root (/) directory for absolute paths or the base path
# of a relative path. We can then walk the appropriate directory
# path to apply attributes.
# Something like mkdir -p with mode applied to all of the newly created directories
for dirname in path.strip('/').split('/'):
curpath = '/'.join([curpath, dirname])
# Remove leading slash if we're creating a relative path
if not os.path.isabs(path):
curpath = curpath.lstrip('/')
b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
if not os.path.exists(b_curpath):
try:
os.mkdir(b_curpath)
changed = True
except OSError as ex:
# Possibly something else created the dir since the os.path.exists
# check above. As long as it's a dir, we don't need to error out.
if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
raise
tmp_file_args = file_args.copy()
tmp_file_args['path'] = curpath
changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
except Exception as e:
raise AnsibleModuleError(results={'msg': 'There was an issue creating %s as requested:'
' %s' % (curpath, to_native(e)),
'path': path})
return {'path': path, 'changed': changed, 'diff': diff}
elif prev_state != 'directory':
# We already know prev_state is not 'absent', therefore it exists in some form.
raise AnsibleModuleError(results={'msg': '%s already exists as a %s' % (path, prev_state),
'path': path})
#
# previous state == directory
#
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
if recurse:
changed |= recursive_set_attributes(b_path, follow, file_args, mtime, atime)
return {'path': path, 'changed': changed, 'diff': diff}
def ensure_symlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# source is both the source of a symlink or an informational passing of the src for a template module
# or copy module, even if this module never uses it, it is needed to key off some things
if src is None:
if follow:
# use the current target of the link as the source
src = to_native(os.path.realpath(b_path), errors='strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
if not os.path.islink(b_path) and os.path.isdir(b_path):
relpath = path
else:
b_relpath = os.path.dirname(b_path)
relpath = to_native(b_relpath, errors='strict')
absrc = os.path.join(relpath, src)
b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
if not force and not os.path.exists(b_absrc):
raise AnsibleModuleError(results={'msg': 'src file does not exist, use "force=yes" if you'
' really want to create the link: %s' % absrc,
'path': path, 'src': src})
if prev_state == 'directory':
if not force:
raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
% (prev_state, path),
'path': path})
elif os.listdir(b_path):
# refuse to replace a directory that has files in it
raise AnsibleModuleError(results={'msg': 'the directory %s is not empty, refusing to'
' convert it' % path,
'path': path})
elif prev_state in ('file', 'hard') and not force:
raise AnsibleModuleError(results={'msg': 'refusing to convert from %s to symlink for %s'
% (prev_state, path),
'path': path})
diff = initial_diff(path, 'link', prev_state)
changed = False
if prev_state in ('hard', 'file', 'directory', 'absent'):
changed = True
elif prev_state == 'link':
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
else:
raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
os.rmdir(b_path)
os.symlink(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
else:
try:
os.symlink(b_src, b_path)
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
# Now that we might have created the symlink, get the arguments.
# We need to do it now so we can properly follow the symlink if needed
# because load_file_common_arguments sets 'path' according
# the value of follow and the symlink existence.
file_args = module.load_file_common_arguments(module.params)
# Whenever we create a link to a nonexistent target we know that the nonexistent target
# cannot have any permissions set on it. Skip setting those and emit a warning (the user
# can set follow=False to remove the warning)
if follow and os.path.islink(b_path) and not os.path.exists(file_args['path']):
module.warn('Cannot set fs attributes on a non-existent symlink target. follow should be'
' set to False to avoid this.')
else:
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def ensure_hardlink(path, src, follow, force, timestamps):
b_path = to_bytes(path, errors='surrogate_or_strict')
b_src = to_bytes(src, errors='surrogate_or_strict')
prev_state = get_state(b_path)
file_args = module.load_file_common_arguments(module.params)
mtime = get_timestamp_for_time(timestamps['modification_time'], timestamps['modification_time_format'])
atime = get_timestamp_for_time(timestamps['access_time'], timestamps['access_time_format'])
# src is the source of a hardlink. We require it if we are creating a new hardlink.
# We require path in the argument_spec so we know it is present at this point.
if src is None:
raise AnsibleModuleError(results={'msg': 'src is required for creating new hardlinks'})
if not os.path.exists(b_src):
raise AnsibleModuleError(results={'msg': 'src does not exist', 'dest': path, 'src': src})
diff = initial_diff(path, 'hard', prev_state)
changed = False
if prev_state == 'absent':
changed = True
elif prev_state == 'link':
b_old_src = os.readlink(b_path)
if b_old_src != b_src:
diff['before']['src'] = to_native(b_old_src, errors='strict')
diff['after']['src'] = src
changed = True
elif prev_state == 'hard':
if not os.stat(b_path).st_ino == os.stat(b_src).st_ino:
changed = True
if not force:
raise AnsibleModuleError(results={'msg': 'Cannot link, different hard link exists at destination',
'dest': path, 'src': src})
elif prev_state == 'file':
changed = True
if not force:
raise AnsibleModuleError(results={'msg': 'Cannot link, %s exists at destination' % prev_state,
'dest': path, 'src': src})
elif prev_state == 'directory':
changed = True
if os.path.exists(b_path):
if os.stat(b_path).st_ino == os.stat(b_src).st_ino:
return {'path': path, 'changed': False}
elif not force:
raise AnsibleModuleError(results={'msg': 'Cannot link: different hard link exists at destination',
'dest': path, 'src': src})
else:
raise AnsibleModuleError(results={'msg': 'unexpected position reached', 'dest': path, 'src': src})
if changed and not module.check_mode:
if prev_state != 'absent':
# try to replace atomically
b_tmppath = to_bytes(os.path.sep).join(
[os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
)
try:
if prev_state == 'directory':
if os.path.exists(b_path):
try:
os.unlink(b_path)
except OSError as e:
if e.errno != errno.ENOENT: # It may already have been removed
raise
os.link(b_src, b_tmppath)
os.rename(b_tmppath, b_path)
except OSError as e:
if os.path.exists(b_tmppath):
os.unlink(b_tmppath)
raise AnsibleModuleError(results={'msg': 'Error while replacing: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
else:
try:
os.link(b_src, b_path)
except OSError as e:
raise AnsibleModuleError(results={'msg': 'Error while linking: %s'
% to_native(e, nonstring='simplerepr'),
'path': path})
if module.check_mode and not os.path.exists(b_path):
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
changed = module.set_fs_attributes_if_different(file_args, changed, diff, expand=False)
changed |= update_timestamp_for_file(file_args['path'], mtime, atime, diff)
return {'dest': path, 'src': src, 'changed': changed, 'diff': diff}
def main():
global module
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['absent', 'directory', 'file', 'hard', 'link', 'touch']),
path=dict(type='path', required=True, aliases=['dest', 'name']),
_original_basename=dict(type='str'), # Internal use only, for recursive ops
recurse=dict(type='bool', default=False),
force=dict(type='bool', default=False), # Note: Should not be in file_common_args in future
follow=dict(type='bool', default=True), # Note: Different default than file_common_args
_diff_peek=dict(type='bool'), # Internal use only, for internal checks in the action plugins
src=dict(type='path'), # Note: Should not be in file_common_args in future
modification_time=dict(type='str'),
modification_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
access_time=dict(type='str'),
access_time_format=dict(type='str', default='%Y%m%d%H%M.%S'),
),
add_file_common_args=True,
supports_check_mode=True,
)
# When we rewrite basic.py, we will do something similar to this on instantiating an AnsibleModule
sys.excepthook = _ansible_excepthook
additional_parameter_handling(module.params)
params = module.params
state = params['state']
recurse = params['recurse']
force = params['force']
follow = params['follow']
path = params['path']
src = params['src']
timestamps = {}
timestamps['modification_time'] = keep_backward_compatibility_on_timestamps(params['modification_time'], state)
timestamps['modification_time_format'] = params['modification_time_format']
timestamps['access_time'] = keep_backward_compatibility_on_timestamps(params['access_time'], state)
timestamps['access_time_format'] = params['access_time_format']
# short-circuit for diff_peek
if params['_diff_peek'] is not None:
appears_binary = execute_diff_peek(to_bytes(path, errors='surrogate_or_strict'))
module.exit_json(path=path, changed=False, appears_binary=appears_binary)
if state == 'file':
result = ensure_file_attributes(path, follow, timestamps)
elif state == 'directory':
result = ensure_directory(path, follow, recurse, timestamps)
elif state == 'link':
result = ensure_symlink(path, src, follow, force, timestamps)
elif state == 'hard':
result = ensure_hardlink(path, src, follow, force, timestamps)
elif state == 'touch':
result = execute_touch(path, follow, timestamps)
elif state == 'absent':
result = ensure_absent(path)
module.exit_json(**result)
if __name__ == '__main__':
main()
| Shaps/ansible | lib/ansible/modules/files/file.py | Python | gpl-3.0 | 37,706 |
import util, pexpect, time, math
from pymavlink import mavwp
# a list of pexpect objects to read while waiting for
# messages. This keeps the output to stdout flowing
expect_list = []
def expect_list_clear():
'''clear the expect list'''
global expect_list
for p in expect_list[:]:
expect_list.remove(p)
def expect_list_extend(list):
'''extend the expect list'''
global expect_list
expect_list.extend(list)
def idle_hook(mav):
'''called when waiting for a mavlink message'''
global expect_list
for p in expect_list:
util.pexpect_drain(p)
def message_hook(mav, msg):
'''called as each mavlink msg is received'''
idle_hook(mav)
def expect_callback(e):
'''called when waiting for a expect pattern'''
global expect_list
for p in expect_list:
if p == e:
continue
util.pexpect_drain(p)
def get_distance(loc1, loc2):
'''get ground distance between two locations'''
dlat = loc2.lat - loc1.lat
dlong = loc2.lng - loc1.lng
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
def get_bearing(loc1, loc2):
'''get bearing from loc1 to loc2'''
off_x = loc2.lng - loc1.lng
off_y = loc2.lat - loc1.lat
bearing = 90.00 + math.atan2(-off_y, off_x) * 57.2957795
if bearing < 0:
bearing += 360.00
return bearing;
def wait_altitude(mav, alt_min, alt_max, timeout=30):
climb_rate = 0
previous_alt = 0
'''wait for a given altitude range'''
tstart = time.time()
print("Waiting for altitude between %u and %u" % (alt_min, alt_max))
while time.time() < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
climb_rate = m.alt - previous_alt
previous_alt = m.alt
print("Wait Altitude: Cur:%u, min_alt:%u, climb_rate: %u" % (m.alt, alt_min , climb_rate))
if abs(climb_rate) > 0:
tstart = time.time();
if m.alt >= alt_min and m.alt <= alt_max:
print("Altitude OK")
return True
print("Failed to attain altitude range")
return False
def wait_groundspeed(mav, gs_min, gs_max, timeout=30):
'''wait for a given ground speed range'''
tstart = time.time()
print("Waiting for groundspeed between %.1f and %.1f" % (gs_min, gs_max))
while time.time() < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
print("Wait groundspeed %.1f, target:%.1f" % (m.groundspeed, gs_min))
if m.groundspeed >= gs_min and m.groundspeed <= gs_max:
return True
print("Failed to attain groundspeed range")
return False
def wait_roll(mav, roll, accuracy, timeout=30):
'''wait for a given roll in degrees'''
tstart = time.time()
print("Waiting for roll of %u" % roll)
while time.time() < tstart + timeout:
m = mav.recv_match(type='ATTITUDE', blocking=True)
r = math.degrees(m.roll)
print("Roll %u" % r)
if math.fabs(r - roll) <= accuracy:
print("Attained roll %u" % roll)
return True
print("Failed to attain roll %u" % roll)
return False
def wait_pitch(mav, pitch, accuracy, timeout=30):
'''wait for a given pitch in degrees'''
tstart = time.time()
print("Waiting for pitch of %u" % pitch)
while time.time() < tstart + timeout:
m = mav.recv_match(type='ATTITUDE', blocking=True)
r = math.degrees(m.pitch)
print("Pitch %u" % r)
if math.fabs(r - pitch) <= accuracy:
print("Attained pitch %u" % pitch)
return True
print("Failed to attain pitch %u" % pitch)
return False
def wait_heading(mav, heading, accuracy=5, timeout=30):
'''wait for a given heading'''
tstart = time.time()
print("Waiting for heading %u with accuracy %u" % (heading, accuracy))
while time.time() < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
print("Heading %u" % m.heading)
if math.fabs(m.heading - heading) <= accuracy:
print("Attained heading %u" % heading)
return True
print("Failed to attain heading %u" % heading)
return False
def wait_distance(mav, distance, accuracy=5, timeout=30):
'''wait for flight of a given distance'''
tstart = time.time()
start = mav.location()
while time.time() < tstart + timeout:
pos = mav.location()
delta = get_distance(start, pos)
print("Distance %.2f meters" % delta)
if math.fabs(delta - distance) <= accuracy:
print("Attained distance %.2f meters OK" % delta)
return True
if delta > (distance + accuracy):
print("Failed distance - overshoot delta=%f distance=%f" % (delta, distance))
return False
print("Failed to attain distance %u" % distance)
return False
def wait_location(mav, loc, accuracy=5, timeout=30, target_altitude=None, height_accuracy=-1):
'''wait for arrival at a location'''
tstart = time.time()
if target_altitude is None:
target_altitude = loc.alt
print("Waiting for location %.4f,%.4f at altitude %.1f height_accuracy=%.1f" % (
loc.lat, loc.lng, target_altitude, height_accuracy))
while time.time() < tstart + timeout:
pos = mav.location()
delta = get_distance(loc, pos)
print("Distance %.2f meters alt %.1f" % (delta, pos.alt))
if delta <= accuracy:
if height_accuracy != -1 and math.fabs(pos.alt - target_altitude) > height_accuracy:
continue
print("Reached location (%.2f meters)" % delta)
return True
print("Failed to attain location")
return False
def wait_waypoint(mav, wpnum_start, wpnum_end, allow_skip=True, max_dist=2, timeout=400, mode=None):
'''wait for waypoint ranges'''
tstart = time.time()
# this message arrives after we set the current WP
start_wp = mav.waypoint_current()
current_wp = start_wp
print("\ntest: wait for waypoint ranges start=%u end=%u\n\n" % (wpnum_start, wpnum_end))
# if start_wp != wpnum_start:
# print("test: Expected start waypoint %u but got %u" % (wpnum_start, start_wp))
# return False
while time.time() < tstart + timeout:
seq = mav.waypoint_current()
m = mav.recv_match(type='NAV_CONTROLLER_OUTPUT', blocking=True)
wp_dist = m.wp_dist
m = mav.recv_match(type='VFR_HUD', blocking=True)
# if we exited the required mode, finish
if mode is not None and mav.flightmode != mode:
print('Exited %s mode' % mode)
return True
print("test: WP %u (wp_dist=%u Alt=%d), current_wp: %u, wpnum_end: %u" % (seq, wp_dist, m.alt, current_wp, wpnum_end))
if seq == current_wp+1 or (seq > current_wp+1 and allow_skip):
print("test: Starting new waypoint %u" % seq)
tstart = time.time()
current_wp = seq
# the wp_dist check is a hack until we can sort out the right seqnum
# for end of mission
#if current_wp == wpnum_end or (current_wp == wpnum_end-1 and wp_dist < 2):
if (current_wp == wpnum_end and wp_dist < max_dist):
print("Reached final waypoint %u" % seq)
return True
if (seq >= 255):
print("Reached final waypoint %u" % seq)
return True
if seq > current_wp+1:
print("Failed: Skipped waypoint! Got wp %u expected %u" % (seq, current_wp+1))
return False
print("Failed: Timed out waiting for waypoint %u of %u" % (wpnum_end, wpnum_end))
return False
def save_wp(mavproxy, mav):
mavproxy.send('rc 7 2000\n')
mav.recv_match(condition='RC_CHANNELS_RAW.chan7_raw==2000', blocking=True)
mavproxy.send('rc 7 1000\n')
mav.recv_match(condition='RC_CHANNELS_RAW.chan7_raw==1000', blocking=True)
def wait_mode(mav, mode):
'''wait for a flight mode to be engaged'''
print("Waiting for mode %s" % mode)
mav.recv_match(condition='MAV.flightmode.upper()=="%s".upper()' % mode, blocking=True)
print("Got mode %s" % mode)
def mission_count(filename):
'''load a mission from a file and return number of waypoints'''
wploader = mavwp.MAVWPLoader()
wploader.load(filename)
num_wp = wploader.count()
return num_wp
| owenson/ardupilot | Tools/autotest/common.py | Python | gpl-3.0 | 8,336 |
# Timed mute: !tm <player> <seconds> <reason>
# default time 5 minutes, default reason None
# by topologist June 30th 2012
from scheduler import Scheduler
from commands import add, admin, get_player, join_arguments, name
@name('tm')
@admin
def timed_mute(connection, *args):
protocol = connection.protocol
nick = args[0]
time = int(args[1])
reason = join_arguments(args[2:])
player = get_player(protocol, nick)
if time < 0:
raise ValueError()
if not player.mute:
TimedMute(player, time, reason)
else:
return '%s is already muted!' % nick
add(timed_mute)
class TimedMute(object):
player = None
time = None
def __init__(self, player, time = 300, reason = 'None'):
if time == 0:
player.mute = True
player.protocol.send_chat('%s was muted indefinitely (Reason: %s)' % (
player.name, reason), irc = True)
return
schedule = Scheduler(player.protocol)
schedule.call_later(time, self.end)
player.mute_schedule = schedule
player.protocol.send_chat('%s was muted for %s seconds (Reason: %s)' % (
player.name, time, reason), irc = True)
player.mute = True
self.player = player
self.time = time
def end(self):
self.player.mute = False
message = '%s was unmuted after %s seconds' % (self.player.name, self.time)
self.player.protocol.send_chat(message, irc = True)
def apply_script(protocol, connection, config):
class TimedMuteConnection(connection):
mute_schedule = None
def on_disconnect(self):
if self.mute_schedule:
del self.mute_schedule
connection.on_disconnect(self)
return protocol, TimedMuteConnection
| Architektor/PySnip | contrib/scripts/timedmute.py | Python | gpl-3.0 | 1,733 |
"""
This file contains the logic for cohorts, as exposed internally to the
forums, and to the cohort admin views.
"""
import logging
import random
import request_cache
from courseware import courses
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.db import IntegrityError, transaction
from django.db.models.signals import m2m_changed, post_save
from django.dispatch import receiver
from django.http import Http404
from django.utils.translation import ugettext as _
from eventtracking import tracker
from request_cache.middleware import request_cached
from student.models import get_user_by_username_or_email
from .models import (
CohortMembership,
CourseCohort,
CourseCohortsSettings,
CourseUserGroup,
CourseUserGroupPartitionGroup,
UnregisteredLearnerCohortAssignments
)
log = logging.getLogger(__name__)
@receiver(post_save, sender=CourseUserGroup)
def _cohort_added(sender, **kwargs):
"""Emits a tracking log event each time a cohort is created"""
instance = kwargs["instance"]
if kwargs["created"] and instance.group_type == CourseUserGroup.COHORT:
tracker.emit(
"edx.cohort.created",
{"cohort_id": instance.id, "cohort_name": instance.name}
)
@receiver(m2m_changed, sender=CourseUserGroup.users.through)
def _cohort_membership_changed(sender, **kwargs):
"""Emits a tracking log event each time cohort membership is modified"""
def get_event_iter(user_id_iter, cohort_iter):
"""
Returns a dictionary containing a mashup of cohort and user information for the given lists
"""
return (
{"cohort_id": cohort.id, "cohort_name": cohort.name, "user_id": user_id}
for user_id in user_id_iter
for cohort in cohort_iter
)
action = kwargs["action"]
instance = kwargs["instance"]
pk_set = kwargs["pk_set"]
reverse = kwargs["reverse"]
if action == "post_add":
event_name = "edx.cohort.user_added"
elif action in ["post_remove", "pre_clear"]:
event_name = "edx.cohort.user_removed"
else:
return
if reverse:
user_id_iter = [instance.id]
if action == "pre_clear":
cohort_iter = instance.course_groups.filter(group_type=CourseUserGroup.COHORT)
else:
cohort_iter = CourseUserGroup.objects.filter(pk__in=pk_set, group_type=CourseUserGroup.COHORT)
else:
cohort_iter = [instance] if instance.group_type == CourseUserGroup.COHORT else []
if action == "pre_clear":
user_id_iter = (user.id for user in instance.users.all())
else:
user_id_iter = pk_set
for event in get_event_iter(user_id_iter, cohort_iter):
tracker.emit(event_name, event)
# A 'default cohort' is an auto-cohort that is automatically created for a course if no cohort with automatic
# assignment have been specified. It is intended to be used in a cohorted course for users who have yet to be assigned
# to a cohort, if the course staff have not explicitly created a cohort of type "RANDOM".
# Note that course staff have the ability to change the name of this cohort after creation via the cohort
# management UI in the instructor dashboard.
DEFAULT_COHORT_NAME = _("Default Group")
# tl;dr: global state is bad. capa reseeds random every time a problem is loaded. Even
# if and when that's fixed, it's a good idea to have a local generator to avoid any other
# code that messes with the global random module.
_local_random = None
def local_random():
"""
Get the local random number generator. In a function so that we don't run
random.Random() at import time.
"""
# ironic, isn't it?
global _local_random
if _local_random is None:
_local_random = random.Random()
return _local_random
def is_course_cohorted(course_key):
"""
Given a course key, return a boolean for whether or not the course is
cohorted.
Raises:
Http404 if the course doesn't exist.
"""
return _get_course_cohort_settings(course_key).is_cohorted
def get_course_cohort_id(course_key):
"""
Given a course key, return the int id for the cohort settings.
Raises:
Http404 if the course doesn't exist.
"""
return _get_course_cohort_settings(course_key).id
def set_course_cohorted(course_key, cohorted):
"""
Given a course course and a boolean, sets whether or not the course is cohorted.
Raises:
Value error if `cohorted` is not a boolean
"""
if not isinstance(cohorted, bool):
raise ValueError("Cohorted must be a boolean")
course_cohort_settings = _get_course_cohort_settings(course_key)
course_cohort_settings.is_cohorted = cohorted
course_cohort_settings.save()
def get_cohort_id(user, course_key, use_cached=False):
"""
Given a course key and a user, return the id of the cohort that user is
assigned to in that course. If they don't have a cohort, return None.
"""
cohort = get_cohort(user, course_key, use_cached=use_cached)
return None if cohort is None else cohort.id
COHORT_CACHE_NAMESPACE = u"cohorts.get_cohort"
def _cohort_cache_key(user_id, course_key):
"""
Returns the cache key for the given user_id and course_key.
"""
return u"{}.{}".format(user_id, course_key)
def bulk_cache_cohorts(course_key, users):
"""
Pre-fetches and caches the cohort assignments for the
given users, for later fast retrieval by get_cohort.
"""
# before populating the cache with another bulk set of data,
# remove previously cached entries to keep memory usage low.
request_cache.clear_cache(COHORT_CACHE_NAMESPACE)
cache = request_cache.get_cache(COHORT_CACHE_NAMESPACE)
if is_course_cohorted(course_key):
cohorts_by_user = {
membership.user: membership
for membership in
CohortMembership.objects.filter(user__in=users, course_id=course_key).select_related('user__id')
}
for user, membership in cohorts_by_user.iteritems():
cache[_cohort_cache_key(user.id, course_key)] = membership.course_user_group
uncohorted_users = filter(lambda u: u not in cohorts_by_user, users)
else:
uncohorted_users = users
for user in uncohorted_users:
cache[_cohort_cache_key(user.id, course_key)] = None
def get_cohort(user, course_key, assign=True, use_cached=False):
"""
Returns the user's cohort for the specified course.
The cohort for the user is cached for the duration of a request. Pass
use_cached=True to use the cached value instead of fetching from the
database.
Arguments:
user: a Django User object.
course_key: CourseKey
assign (bool): if False then we don't assign a group to user
use_cached (bool): Whether to use the cached value or fetch from database.
Returns:
A CourseUserGroup object if the course is cohorted and the User has a
cohort, else None.
Raises:
ValueError if the CourseKey doesn't exist.
"""
cache = request_cache.get_cache(COHORT_CACHE_NAMESPACE)
cache_key = _cohort_cache_key(user.id, course_key)
if use_cached and cache_key in cache:
return cache[cache_key]
cache.pop(cache_key, None)
# First check whether the course is cohorted (users shouldn't be in a cohort
# in non-cohorted courses, but settings can change after course starts)
if not is_course_cohorted(course_key):
return cache.setdefault(cache_key, None)
# If course is cohorted, check if the user already has a cohort.
try:
membership = CohortMembership.objects.get(
course_id=course_key,
user_id=user.id,
)
return cache.setdefault(cache_key, membership.course_user_group)
except CohortMembership.DoesNotExist:
# Didn't find the group. If we do not want to assign, return here.
if not assign:
# Do not cache the cohort here, because in the next call assign
# may be True, and we will have to assign the user a cohort.
return None
# Otherwise assign the user a cohort.
try:
with transaction.atomic():
# If learner has been pre-registered in a cohort, get that cohort. Otherwise assign to a random cohort.
course_user_group = None
for assignment in UnregisteredLearnerCohortAssignments.objects.filter(email=user.email, course_id=course_key):
course_user_group = assignment.course_user_group
unregistered_learner = assignment
if course_user_group:
unregistered_learner.delete()
else:
course_user_group = get_random_cohort(course_key)
membership = CohortMembership.objects.create(
user=user,
course_user_group=course_user_group,
)
return cache.setdefault(cache_key, membership.course_user_group)
except IntegrityError as integrity_error:
# An IntegrityError is raised when multiple workers attempt to
# create the same row in one of the cohort model entries:
# CourseCohort, CohortMembership.
log.info(
"HANDLING_INTEGRITY_ERROR: IntegrityError encountered for course '%s' and user '%s': %s",
course_key, user.id, unicode(integrity_error)
)
return get_cohort(user, course_key, assign, use_cached)
def get_random_cohort(course_key):
"""
Helper method to get a cohort for random assignment.
If there are multiple cohorts of type RANDOM in the course, one of them will be randomly selected.
If there are no existing cohorts of type RANDOM in the course, one will be created.
"""
course = courses.get_course(course_key)
cohorts = get_course_cohorts(course, assignment_type=CourseCohort.RANDOM)
if cohorts:
cohort = local_random().choice(cohorts)
else:
cohort = CourseCohort.create(
cohort_name=DEFAULT_COHORT_NAME,
course_id=course_key,
assignment_type=CourseCohort.RANDOM
).course_user_group
return cohort
def migrate_cohort_settings(course):
"""
Migrate all the cohort settings associated with this course from modulestore to mysql.
After that we will never touch modulestore for any cohort related settings.
"""
cohort_settings, created = CourseCohortsSettings.objects.get_or_create(
course_id=course.id,
defaults=_get_cohort_settings_from_modulestore(course)
)
# Add the new and update the existing cohorts
if created:
# Update the manual cohorts already present in CourseUserGroup
manual_cohorts = CourseUserGroup.objects.filter(
course_id=course.id,
group_type=CourseUserGroup.COHORT
).exclude(name__in=course.auto_cohort_groups)
for cohort in manual_cohorts:
CourseCohort.create(course_user_group=cohort)
for group_name in course.auto_cohort_groups:
CourseCohort.create(cohort_name=group_name, course_id=course.id, assignment_type=CourseCohort.RANDOM)
return cohort_settings
def get_course_cohorts(course, assignment_type=None):
"""
Get a list of all the cohorts in the given course. This will include auto cohorts,
regardless of whether or not the auto cohorts include any users.
Arguments:
course: the course for which cohorts should be returned
assignment_type: cohort assignment type
Returns:
A list of CourseUserGroup objects. Empty if there are no cohorts. Does
not check whether the course is cohorted.
"""
# Migrate cohort settings for this course
migrate_cohort_settings(course)
query_set = CourseUserGroup.objects.filter(
course_id=course.location.course_key,
group_type=CourseUserGroup.COHORT
)
query_set = query_set.filter(cohort__assignment_type=assignment_type) if assignment_type else query_set
return list(query_set)
def get_cohort_names(course):
"""Return a dict that maps cohort ids to names for the given course"""
return {cohort.id: cohort.name for cohort in get_course_cohorts(course)}
### Helpers for cohort management views
def get_cohort_by_name(course_key, name):
"""
Return the CourseUserGroup object for the given cohort. Raises DoesNotExist
it isn't present.
"""
return CourseUserGroup.objects.get(
course_id=course_key,
group_type=CourseUserGroup.COHORT,
name=name
)
def get_cohort_by_id(course_key, cohort_id):
"""
Return the CourseUserGroup object for the given cohort. Raises DoesNotExist
it isn't present. Uses the course_key for extra validation.
"""
return CourseUserGroup.objects.get(
course_id=course_key,
group_type=CourseUserGroup.COHORT,
id=cohort_id
)
def add_cohort(course_key, name, assignment_type):
"""
Add a cohort to a course. Raises ValueError if a cohort of the same name already
exists.
"""
log.debug("Adding cohort %s to %s", name, course_key)
if is_cohort_exists(course_key, name):
raise ValueError(_("You cannot create two cohorts with the same name"))
try:
course = courses.get_course_by_id(course_key)
except Http404:
raise ValueError("Invalid course_key")
cohort = CourseCohort.create(
cohort_name=name,
course_id=course.id,
assignment_type=assignment_type
).course_user_group
tracker.emit(
"edx.cohort.creation_requested",
{"cohort_name": cohort.name, "cohort_id": cohort.id}
)
return cohort
def is_cohort_exists(course_key, name):
"""
Check if a cohort already exists.
"""
return CourseUserGroup.objects.filter(course_id=course_key, group_type=CourseUserGroup.COHORT, name=name).exists()
def remove_user_from_cohort(cohort, username_or_email):
"""
Look up the given user, and if successful, remove them from the specified cohort.
Arguments:
cohort: CourseUserGroup
username_or_email: string. Treated as email if has '@'
Raises:
User.DoesNotExist if can't find user.
ValueError if user not already present in this cohort.
"""
user = get_user_by_username_or_email(username_or_email)
try:
membership = CohortMembership.objects.get(course_user_group=cohort, user=user)
membership.delete()
except CohortMembership.DoesNotExist:
raise ValueError("User {} was not present in cohort {}".format(username_or_email, cohort))
def add_user_to_cohort(cohort, username_or_email):
"""
Look up the given user, and if successful, add them to the specified cohort.
Arguments:
cohort: CourseUserGroup
username_or_email: string. Treated as email if has '@'
Returns:
User object (or None if the email address is preassigned),
string (or None) indicating previous cohort,
and whether the user is a preassigned user or not
Raises:
User.DoesNotExist if can't find user. However, if a valid email is provided for the user, it is stored
in a database so that the user can be added to the cohort if they eventually enroll in the course.
ValueError if user already present in this cohort.
ValidationError if an invalid email address is entered.
User.DoesNotExist if a user could not be found.
"""
try:
user = get_user_by_username_or_email(username_or_email)
membership = CohortMembership(course_user_group=cohort, user=user)
membership.save() # This will handle both cases, creation and updating, of a CohortMembership for this user.
tracker.emit(
"edx.cohort.user_add_requested",
{
"user_id": user.id,
"cohort_id": cohort.id,
"cohort_name": cohort.name,
"previous_cohort_id": membership.previous_cohort_id,
"previous_cohort_name": membership.previous_cohort_name,
}
)
return (user, membership.previous_cohort_name, False)
except User.DoesNotExist as ex:
# If username_or_email is an email address, store in database.
try:
validate_email(username_or_email)
try:
assignment = UnregisteredLearnerCohortAssignments.objects.get(
email=username_or_email, course_id=cohort.course_id
)
assignment.course_user_group = cohort
assignment.save()
except UnregisteredLearnerCohortAssignments.DoesNotExist:
assignment = UnregisteredLearnerCohortAssignments.objects.create(
course_user_group=cohort, email=username_or_email, course_id=cohort.course_id
)
tracker.emit(
"edx.cohort.email_address_preassigned",
{
"user_email": assignment.email,
"cohort_id": cohort.id,
"cohort_name": cohort.name,
}
)
return (None, None, True)
except ValidationError as invalid:
if "@" in username_or_email:
raise invalid
else:
raise ex
def get_group_info_for_cohort(cohort, use_cached=False):
"""
Get the ids of the group and partition to which this cohort has been linked
as a tuple of (int, int).
If the cohort has not been linked to any group/partition, both values in the
tuple will be None.
The partition group info is cached for the duration of a request. Pass
use_cached=True to use the cached value instead of fetching from the
database.
"""
cache = request_cache.get_cache(u"cohorts.get_group_info_for_cohort")
cache_key = unicode(cohort.id)
if use_cached and cache_key in cache:
return cache[cache_key]
cache.pop(cache_key, None)
try:
partition_group = CourseUserGroupPartitionGroup.objects.get(course_user_group=cohort)
return cache.setdefault(cache_key, (partition_group.group_id, partition_group.partition_id))
except CourseUserGroupPartitionGroup.DoesNotExist:
pass
return cache.setdefault(cache_key, (None, None))
def set_assignment_type(user_group, assignment_type):
"""
Set assignment type for cohort.
"""
course_cohort = user_group.cohort
if is_last_random_cohort(user_group) and course_cohort.assignment_type != assignment_type:
raise ValueError(_("There must be one cohort to which students can automatically be assigned."))
course_cohort.assignment_type = assignment_type
course_cohort.save()
def get_assignment_type(user_group):
"""
Get assignment type for cohort.
"""
course_cohort = user_group.cohort
return course_cohort.assignment_type
def is_last_random_cohort(user_group):
"""
Check if this cohort is the only random cohort in the course.
"""
random_cohorts = CourseUserGroup.objects.filter(
course_id=user_group.course_id,
group_type=CourseUserGroup.COHORT,
cohort__assignment_type=CourseCohort.RANDOM
)
return len(random_cohorts) == 1 and random_cohorts[0].name == user_group.name
@request_cached
def _get_course_cohort_settings(course_key):
"""
Return cohort settings for a course. NOTE that the only non-deprecated fields in
CourseCohortSettings are `course_id` and `is_cohorted`. Other fields should only be used for
migration purposes.
Arguments:
course_key: CourseKey
Returns:
A CourseCohortSettings object. NOTE that the only non-deprecated field in
CourseCohortSettings are `course_id` and `is_cohorted`. Other fields should only be used
for migration purposes.
Raises:
Http404 if course_key is invalid.
"""
try:
course_cohort_settings = CourseCohortsSettings.objects.get(course_id=course_key)
except CourseCohortsSettings.DoesNotExist:
course = courses.get_course_by_id(course_key)
course_cohort_settings = migrate_cohort_settings(course)
return course_cohort_settings
def get_legacy_discussion_settings(course_key):
try:
course_cohort_settings = CourseCohortsSettings.objects.get(course_id=course_key)
return {
'is_cohorted': course_cohort_settings.is_cohorted,
'cohorted_discussions': course_cohort_settings.cohorted_discussions,
'always_cohort_inline_discussions': course_cohort_settings.always_cohort_inline_discussions
}
except CourseCohortsSettings.DoesNotExist:
course = courses.get_course_by_id(course_key)
return _get_cohort_settings_from_modulestore(course)
def _get_cohort_settings_from_modulestore(course):
return {
'is_cohorted': course.is_cohorted,
'cohorted_discussions': list(course.cohorted_discussions),
'always_cohort_inline_discussions': course.always_cohort_inline_discussions
}
| pepeportela/edx-platform | openedx/core/djangoapps/course_groups/cohorts.py | Python | agpl-3.0 | 21,299 |
import unittest
from os.path import relpath
from coalib.results.SourcePosition import SourcePosition
from coala_utils.ContextManagers import prepare_file
class SourcePositionTest(unittest.TestCase):
def test_initialization(self):
with self.assertRaises(TypeError):
SourcePosition(None, 0)
with self.assertRaises(ValueError):
SourcePosition('file', None, 1)
# However these should work:
SourcePosition('file', None, None)
SourcePosition('file', 4, None)
SourcePosition('file', 4, 5)
def test_string_conversion(self):
uut = SourcePosition('filename', 1)
self.assertRegex(
repr(uut),
"<SourcePosition object\\(file='.*filename', line=1, "
'column=None\\) at 0x[0-9a-fA-F]+>')
self.assertEqual(str(uut), 'filename:1')
uut = SourcePosition('None', None)
self.assertRegex(
repr(uut),
"<SourcePosition object\\(file='.*None', line=None, column=None\\) "
'at 0x[0-9a-fA-F]+>')
self.assertEqual(str(uut), 'None')
uut = SourcePosition('filename', 3, 2)
self.assertEqual(str(uut), 'filename:3:2')
def test_json(self):
with prepare_file([''], None) as (_, filename):
uut = SourcePosition(filename, 1)
self.assertEqual(uut.__json__(use_relpath=True)
['file'], relpath(filename))
def assert_equal(self, first, second):
self.assertGreaterEqual(first, second)
self.assertEqual(first, second)
self.assertLessEqual(first, second)
def assert_ordering(self, greater, lesser):
self.assertGreater(greater, lesser)
self.assertGreaterEqual(greater, lesser)
self.assertNotEqual(greater, lesser)
self.assertLessEqual(lesser, greater)
self.assertLess(lesser, greater)
| coala/coala | tests/results/SourcePositionTest.py | Python | agpl-3.0 | 1,906 |
from . import ir_ui_view
| Vauxoo/website | website_lazy_load_image/models/__init__.py | Python | agpl-3.0 | 25 |
from __future__ import absolute_import
from dbm import *
from ..version_info import PY2
if PY2:
from . import dumb, gnu, ndbm
from whichdb import *
from anydbm import *
| EnTeQuAk/dotfiles | sublime-text-3/Packages/isort/pies/dbm/__init__.py | Python | unlicense | 184 |
"""Remove unused models
Revision ID: 3f289637f530
Revises: 4ba1dd8c3080
Create Date: 2014-04-17 11:08:50.963964
"""
# revision identifiers, used by Alembic.
revision = '3f289637f530'
down_revision = '4ba1dd8c3080'
from alembic import op
def upgrade():
op.drop_table('aggtestgroup')
op.drop_table('testgroup_test')
op.drop_table('testgroup')
op.drop_table('aggtestsuite')
def downgrade():
raise NotImplementedError
| bowlofstew/changes | migrations/versions/3f289637f530_remove_unused_models.py | Python | apache-2.0 | 442 |
#!/usr/bin/env python
#
# Copyright 2013 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
"""
This script creates a pile of compile-fail tests check that all the
derives have spans that point to the fields, rather than the
#[derive(...)] line.
sample usage: src/etc/generate-deriving-span-tests.py
"""
import os, datetime, stat, re
TEST_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../test/ui/derives/'))
YEAR = datetime.datetime.now().year
TEMPLATE = """// Copyright {year} The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// This file was auto-generated using 'src/etc/generate-deriving-span-tests.py'
{error_deriving}
struct Error;
{code}
fn main() {{}}
"""
ENUM_STRING = """
#[derive({traits})]
enum Enum {{
A(
Error {errors}
)
}}
"""
ENUM_STRUCT_VARIANT_STRING = """
#[derive({traits})]
enum Enum {{
A {{
x: Error {errors}
}}
}}
"""
STRUCT_STRING = """
#[derive({traits})]
struct Struct {{
x: Error {errors}
}}
"""
STRUCT_TUPLE_STRING = """
#[derive({traits})]
struct Struct(
Error {errors}
);
"""
ENUM_TUPLE, ENUM_STRUCT, STRUCT_FIELDS, STRUCT_TUPLE = range(4)
def create_test_case(type, trait, super_traits, error_count):
string = [ENUM_STRING, ENUM_STRUCT_VARIANT_STRING, STRUCT_STRING, STRUCT_TUPLE_STRING][type]
all_traits = ','.join([trait] + super_traits)
super_traits = ','.join(super_traits)
error_deriving = '#[derive(%s)]' % super_traits if super_traits else ''
errors = '\n'.join('//~%s ERROR' % ('^' * n) for n in range(error_count))
code = string.format(traits = all_traits, errors = errors)
return TEMPLATE.format(year = YEAR, error_deriving=error_deriving, code = code)
def write_file(name, string):
test_file = os.path.join(TEST_DIR, 'derives-span-%s.rs' % name)
with open(test_file) as f:
old_str = f.read()
old_str_ignoring_date = re.sub(r'^// Copyright \d+',
'// Copyright {year}'.format(year = YEAR), old_str)
if old_str_ignoring_date == string:
# if all we're doing is updating the copyright year, ignore it
return 0
# set write permission if file exists, so it can be changed
if os.path.exists(test_file):
os.chmod(test_file, stat.S_IWUSR)
with open(test_file, 'w') as f:
f.write(string)
# mark file read-only
os.chmod(test_file, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
return 1
ENUM = 1
STRUCT = 2
ALL = STRUCT | ENUM
traits = {
'Default': (STRUCT, [], 1),
'FromPrimitive': (0, [], 0), # only works for C-like enums
'Decodable': (0, [], 0), # FIXME: quoting gives horrible spans
'Encodable': (0, [], 0), # FIXME: quoting gives horrible spans
}
for (trait, supers, errs) in [('Clone', [], 1),
('PartialEq', [], 2),
('PartialOrd', ['PartialEq'], 1),
('Eq', ['PartialEq'], 1),
('Ord', ['Eq', 'PartialOrd', 'PartialEq'], 1),
('Debug', [], 1),
('Hash', [], 1)]:
traits[trait] = (ALL, supers, errs)
files = 0
for (trait, (types, super_traits, error_count)) in traits.items():
mk = lambda ty: create_test_case(ty, trait, super_traits, error_count)
if types & ENUM:
files += write_file(trait + '-enum', mk(ENUM_TUPLE))
files += write_file(trait + '-enum-struct-variant', mk(ENUM_STRUCT))
if types & STRUCT:
files += write_file(trait + '-struct', mk(STRUCT_FIELDS))
files += write_file(trait + '-tuple-struct', mk(STRUCT_TUPLE))
print('Generated {files} deriving span test{}.'.format('s' if files != 1 else '', files = files))
| GBGamer/rust | src/etc/generate-deriving-span-tests.py | Python | apache-2.0 | 4,526 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import action_chain_runner as acr
from st2actions.container.service import RunnerContainerService
from st2common.constants.action import LIVEACTION_STATUS_RUNNING
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_STATUS_CANCELED
from st2common.constants.action import LIVEACTION_STATUS_TIMED_OUT
from st2common.constants.action import LIVEACTION_STATUS_FAILED
from st2common.exceptions import actionrunner as runnerexceptions
from st2common.models.api.notification import NotificationsHelper
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.db.keyvalue import KeyValuePairDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.keyvalue import KeyValuePair
from st2common.persistence.runner import RunnerType
from st2common.services import action as action_service
from st2common.util import action_db as action_db_util
from st2common.exceptions.action import ParameterRenderingFailedException
from st2tests import DbTestCase
from st2tests.fixturesloader import FixturesLoader
class DummyActionExecution(object):
def __init__(self, status=LIVEACTION_STATUS_SUCCEEDED, result=''):
self.id = None
self.status = status
self.result = result
FIXTURES_PACK = 'generic'
TEST_MODELS = {
'actions': ['a1.yaml', 'a2.yaml', 'action_4_action_context_param.yaml'],
'runners': ['testrunner1.yaml']
}
MODELS = FixturesLoader().load_models(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
ACTION_1 = MODELS['actions']['a1.yaml']
ACTION_2 = MODELS['actions']['a2.yaml']
ACTION_3 = MODELS['actions']['action_4_action_context_param.yaml']
RUNNER = MODELS['runners']['testrunner1.yaml']
CHAIN_1_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain1.yaml')
CHAIN_2_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain2.yaml')
CHAIN_ACTION_CALL_NO_PARAMS_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_action_call_no_params.yaml')
CHAIN_NO_DEFAULT = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'no_default_chain.yaml')
CHAIN_NO_DEFAULT_2 = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'no_default_chain_2.yaml')
CHAIN_BAD_DEFAULT = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'bad_default_chain.yaml')
CHAIN_BROKEN_ON_SUCCESS_PATH_STATIC_TASK_NAME = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_broken_on_success_path_static_task_name.yaml')
CHAIN_BROKEN_ON_FAILURE_PATH_STATIC_TASK_NAME = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_broken_on_failure_path_static_task_name.yaml')
CHAIN_FIRST_TASK_RENDER_FAIL_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_first_task_parameter_render_fail.yaml')
CHAIN_SECOND_TASK_RENDER_FAIL_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_second_task_parameter_render_fail.yaml')
CHAIN_LIST_TEMP_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_list_template.yaml')
CHAIN_DICT_TEMP_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_dict_template.yaml')
CHAIN_DEP_INPUT = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_dependent_input.yaml')
CHAIN_DEP_RESULTS_INPUT = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_dep_result_input.yaml')
MALFORMED_CHAIN_PATH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'malformedchain.yaml')
CHAIN_TYPED_PARAMS = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_typed_params.yaml')
CHAIN_SYSTEM_PARAMS = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_typed_system_params.yaml')
CHAIN_WITH_ACTIONPARAM_VARS = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_with_actionparam_vars.yaml')
CHAIN_WITH_SYSTEM_VARS = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_with_system_vars.yaml')
CHAIN_WITH_PUBLISH = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_with_publish.yaml')
CHAIN_WITH_PUBLISH_PARAM_RENDERING_FAILURE = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_publish_params_rendering_failure.yaml')
CHAIN_WITH_INVALID_ACTION = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_with_invalid_action.yaml')
CHAIN_ACTION_PARAMS_AND_PARAMETERS_ATTRIBUTE = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_action_params_and_parameters.yaml')
CHAIN_ACTION_PARAMS_ATTRIBUTE = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_action_params_attribute.yaml')
CHAIN_ACTION_PARAMETERS_ATTRIBUTE = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_action_parameters_attribute.yaml')
CHAIN_ACTION_INVALID_PARAMETER_TYPE = FixturesLoader().get_fixture_file_path_abs(
FIXTURES_PACK, 'actionchains', 'chain_invalid_parameter_type_passed_to_action.yaml')
CHAIN_NOTIFY_API = {'notify': {'on-complete': {'message': 'foo happened.'}}}
CHAIN_NOTIFY_DB = NotificationsHelper.to_model(CHAIN_NOTIFY_API)
@mock.patch.object(action_db_util, 'get_runnertype_by_name',
mock.MagicMock(return_value=RUNNER))
class TestActionChainRunner(DbTestCase):
def test_runner_creation(self):
runner = acr.get_runner()
self.assertTrue(runner)
self.assertTrue(runner.runner_id)
def test_malformed_chain(self):
try:
chain_runner = acr.get_runner()
chain_runner.entry_point = MALFORMED_CHAIN_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
self.assertTrue(False, 'Expected pre_run to fail.')
except runnerexceptions.ActionRunnerPreRunError:
self.assertTrue(True)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_success_path(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_1_PATH
chain_runner.action = ACTION_1
action_ref = ResourceReference.to_string_reference(name=ACTION_1.name,
pack=ACTION_1.pack)
chain_runner.liveaction = LiveActionDB(action=action_ref)
chain_runner.liveaction.notify = CHAIN_NOTIFY_DB
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 3. Not great but works.
self.assertEqual(request.call_count, 3)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_chain_second_task_times_out(self, request):
# Second task in the chain times out so the action chain status should be timeout
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_2_PATH
chain_runner.action = ACTION_1
original_run_action = chain_runner._run_action
def mock_run_action(*args, **kwargs):
original_live_action = args[0]
liveaction = original_run_action(*args, **kwargs)
if original_live_action.action == 'wolfpack.a2':
# Mock a timeout for second task
liveaction.status = LIVEACTION_STATUS_TIMED_OUT
return liveaction
chain_runner._run_action = mock_run_action
action_ref = ResourceReference.to_string_reference(name=ACTION_1.name,
pack=ACTION_1.pack)
chain_runner.liveaction = LiveActionDB(action=action_ref)
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, _, _ = chain_runner.run({})
self.assertEqual(status, LIVEACTION_STATUS_TIMED_OUT)
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 3. Not great but works.
self.assertEqual(request.call_count, 3)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_task_is_canceled_while_running(self, request):
# Second task in the action is CANCELED, make sure runner doesn't get stuck in an infinite
# loop
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_2_PATH
chain_runner.action = ACTION_1
original_run_action = chain_runner._run_action
def mock_run_action(*args, **kwargs):
original_live_action = args[0]
if original_live_action.action == 'wolfpack.a2':
status = LIVEACTION_STATUS_CANCELED
else:
status = LIVEACTION_STATUS_SUCCEEDED
request.return_value = (DummyActionExecution(status=status), None)
liveaction = original_run_action(*args, **kwargs)
return liveaction
chain_runner._run_action = mock_run_action
action_ref = ResourceReference.to_string_reference(name=ACTION_1.name,
pack=ACTION_1.pack)
chain_runner.liveaction = LiveActionDB(action=action_ref)
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, _, _ = chain_runner.run({})
self.assertEqual(status, LIVEACTION_STATUS_CANCELED)
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# Chain count should be 2 since the last task doesn't get called since the second one was
# canceled
self.assertEqual(request.call_count, 2)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_success_task_action_call_with_no_params(self, request):
# Make sure that the runner doesn't explode if task definition contains
# no "params" section
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_ACTION_CALL_NO_PARAMS_PATH
chain_runner.action = ACTION_1
action_ref = ResourceReference.to_string_reference(name=ACTION_1.name,
pack=ACTION_1.pack)
chain_runner.liveaction = LiveActionDB(action=action_ref)
chain_runner.liveaction.notify = CHAIN_NOTIFY_DB
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 3. Not great but works.
self.assertEqual(request.call_count, 3)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_no_default(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_NO_DEFAULT
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# In case of this chain default_node is the first_node.
default_node = chain_runner.chain_holder.actionchain.default
first_node = chain_runner.chain_holder.actionchain.chain[0]
self.assertEqual(default_node, first_node.name)
# based on the chain the callcount is known to be 3. Not great but works.
self.assertEqual(request.call_count, 3)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_no_default_multiple_options(self, request):
# subtle difference is that when there are multiple possible default nodes
# the order per chain definition may not be preseved. This is really a
# poorly formatted chain but we still the best attempt to work.
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_NO_DEFAULT_2
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# In case of this chain default_node is the first_node.
default_node = chain_runner.chain_holder.actionchain.default
first_node = chain_runner.chain_holder.actionchain.chain[0]
self.assertEqual(default_node, first_node.name)
# based on the chain the callcount is known to be 2.
self.assertEqual(request.call_count, 2)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_bad_default(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_BAD_DEFAULT
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
expected_msg = 'Unable to find node with name "bad_default" referenced in "default".'
self.assertRaisesRegexp(runnerexceptions.ActionRunnerPreRunError,
expected_msg, chain_runner.pre_run)
@mock.patch('eventlet.sleep', mock.MagicMock())
@mock.patch.object(action_db_util, 'get_liveaction_by_id', mock.MagicMock(
return_value=DummyActionExecution()))
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(status=LIVEACTION_STATUS_RUNNING), None))
def test_chain_runner_success_path_with_wait(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_1_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 3. Not great but works.
self.assertEqual(request.call_count, 3)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(status=LIVEACTION_STATUS_FAILED), None))
def test_chain_runner_failure_path(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_1_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, _, _ = chain_runner.run({})
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 2. Not great but works.
self.assertEqual(request.call_count, 2)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(), None))
def test_chain_runner_broken_on_success_path_static_task_name(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_BROKEN_ON_SUCCESS_PATH_STATIC_TASK_NAME
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
expected_msg = ('Unable to find node with name "c5" referenced in "on-success" '
'in task "c2"')
self.assertRaisesRegexp(runnerexceptions.ActionRunnerPreRunError,
expected_msg, chain_runner.pre_run)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(), None))
def test_chain_runner_broken_on_failure_path_static_task_name(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_BROKEN_ON_FAILURE_PATH_STATIC_TASK_NAME
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
expected_msg = ('Unable to find node with name "c6" referenced in "on-failure" '
'in task "c2"')
self.assertRaisesRegexp(runnerexceptions.ActionRunnerPreRunError,
expected_msg, chain_runner.pre_run)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', side_effect=RuntimeError('Test Failure.'))
def test_chain_runner_action_exception(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_1_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, results, _ = chain_runner.run({})
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
# based on the chain the callcount is known to be 2. Not great but works.
self.assertEqual(request.call_count, 2)
error_count = 0
for task_result in results['tasks']:
if task_result['result'].get('error', None):
error_count += 1
self.assertEqual(error_count, 2)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_str_param_temp(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_FIRST_TASK_RENDER_FAIL_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1, 's2': 2, 's3': 3, 's4': 4})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, {"p1": "1"})
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_list_param_temp(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_LIST_TEMP_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1, 's2': 2, 's3': 3, 's4': 4})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, {"p1": "[2, 3, 4]"})
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_dict_param_temp(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_DICT_TEMP_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1, 's2': 2, 's3': 3, 's4': 4})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {"p1": {"p1.3": "[3, 4]", "p1.2": "2", "p1.1": "1"}}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(result={'o1': '1'}), None))
def test_chain_runner_dependent_param_temp(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_DEP_INPUT
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1, 's2': 2, 's3': 3, 's4': 4})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_values = [{u'p1': u'1'},
{u'p1': u'1'},
{u'p2': u'1', u'p3': u'1', u'p1': u'1'}]
# Each of the call_args must be one of
for call_args in request.call_args_list:
self.assertTrue(call_args[0][0].parameters in expected_values)
expected_values.remove(call_args[0][0].parameters)
self.assertEqual(len(expected_values), 0, 'Not all expected values received.')
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(result={'o1': '1'}), None))
def test_chain_runner_dependent_results_param(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_DEP_RESULTS_INPUT
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_values = [{u'p1': u'1'},
{u'p1': u'1'},
{u'out': u"{'c2': {'o1': '1'}, 'c1': {'o1': '1'}}"}]
# Each of the call_args must be one of
self.assertEqual(request.call_count, 3)
for call_args in request.call_args_list:
self.assertTrue(call_args[0][0].parameters in expected_values)
expected_values.remove(call_args[0][0].parameters)
self.assertEqual(len(expected_values), 0, 'Not all expected values received.')
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(RunnerType, 'get_by_name',
mock.MagicMock(return_value=RUNNER))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_missing_param_temp(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_FIRST_TASK_RENDER_FAIL_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertEqual(request.call_count, 0, 'No call expected.')
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_failure_during_param_rendering_single_task(self, request):
# Parameter rendering should result in a top level error which aborts
# the whole chain
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_FIRST_TASK_RENDER_FAIL_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, result, _ = chain_runner.run({})
# No tasks ran because rendering of parameters for the first task failed
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertEqual(result['tasks'], [])
self.assertTrue('error' in result)
self.assertTrue('traceback' in result)
self.assertTrue('Failed to run task "c1". Parameter rendering failed' in result['error'])
self.assertTrue('Traceback' in result['traceback'])
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_failure_during_param_rendering_multiple_tasks(self, request):
# Parameter rendering should result in a top level error which aborts
# the whole chain
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_SECOND_TASK_RENDER_FAIL_PATH
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
status, result, _ = chain_runner.run({})
# Verify that only first task has ran
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertEqual(len(result['tasks']), 1)
self.assertEqual(result['tasks'][0]['name'], 'c1')
expected_error = ('Failed rendering value for action parameter "p1" in '
'task "c2" (template string={{s1}}):')
self.assertTrue('error' in result)
self.assertTrue('traceback' in result)
self.assertTrue('Failed to run task "c2". Parameter rendering failed' in result['error'])
self.assertTrue(expected_error in result['error'])
self.assertTrue('Traceback' in result['traceback'])
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_typed_params(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_TYPED_PARAMS
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'s1': 1, 's2': 'two', 's3': 3.14})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {'booltype': True,
'inttype': 1,
'numbertype': 3.14,
'strtype': 'two',
'arrtype': ['1', 'two'],
'objtype': {'s2': 'two',
'k1': '1'}}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_typed_system_params(self, request):
kvps = []
try:
kvps.append(KeyValuePair.add_or_update(KeyValuePairDB(name='a', value='1')))
kvps.append(KeyValuePair.add_or_update(KeyValuePairDB(name='a.b.c', value='two')))
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_SYSTEM_PARAMS
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {'inttype': 1,
'strtype': 'two'}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
finally:
for kvp in kvps:
KeyValuePair.delete(kvp)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_vars_system_params(self, request):
kvps = []
try:
kvps.append(KeyValuePair.add_or_update(KeyValuePairDB(name='a', value='two')))
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_WITH_SYSTEM_VARS
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {'inttype': 1,
'strtype': 'two',
'strtype_legacy': 'two',
'booltype': True}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
finally:
for kvp in kvps:
KeyValuePair.delete(kvp)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_vars_action_params(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_WITH_ACTIONPARAM_VARS
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
chain_runner.run({'input_a': 'two'})
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {'inttype': 1,
'strtype': 'two',
'booltype': True}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(result={'raw_out': 'published'}), None))
def test_chain_runner_publish(self, request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_WITH_PUBLISH
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.runner_parameters = {'display_published': True}
chain_runner.pre_run()
action_parameters = {'action_param_1': 'test value 1'}
_, result, _ = chain_runner.run(action_parameters=action_parameters)
# We also assert that the action parameters are available in the
# "publish" scope
self.assertNotEqual(chain_runner.chain_holder.actionchain, None)
expected_value = {'inttype': 1,
'strtype': 'published',
'booltype': True,
'published_action_param': action_parameters['action_param_1']}
mock_args, _ = request.call_args
self.assertEqual(mock_args[0].parameters, expected_value)
# Assert that the variables are correctly published
self.assertEqual(result['published'],
{'published_action_param': u'test value 1', 'o1': u'published'})
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_runner_publish_param_rendering_failure(self, request):
# Parameter rendering should result in a top level error which aborts
# the whole chain
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_WITH_PUBLISH_PARAM_RENDERING_FAILURE
chain_runner.action = ACTION_1
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
try:
chain_runner.run({})
except ParameterRenderingFailedException as e:
# TODO: Should we treat this as task error? Right now it bubbles all
# the way up and it's not really consistent with action param
# rendering failure
expected_error = ('Failed rendering value for publish parameter "p1" in '
'task "c2" (template string={{ not_defined }}):')
self.assertTrue(expected_error in str(e))
pass
else:
self.fail('Exception was not thrown')
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_2))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_chain_task_passes_invalid_parameter_type_to_action(self, mock_request):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_ACTION_INVALID_PARAMETER_TYPE
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
action_parameters = {}
expected_msg = ('Failed to cast value "stringnotanarray" \(type: str\) for parameter '
'"arrtype" of type "array"')
self.assertRaisesRegexp(ValueError, expected_msg, chain_runner.run,
action_parameters=action_parameters)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=None))
@mock.patch.object(action_service, 'request',
return_value=(DummyActionExecution(result={'raw_out': 'published'}), None))
def test_action_chain_runner_referenced_action_doesnt_exist(self, mock_request):
# Action referenced by a task doesn't exist, should result in a top level error
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_WITH_INVALID_ACTION
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
action_parameters = {}
status, output, _ = chain_runner.run(action_parameters=action_parameters)
expected_error = ('Failed to run task "c1". Action with reference "wolfpack.a2" '
'doesn\'t exist.')
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
self.assertTrue(expected_error in output['error'])
self.assertTrue('Traceback' in output['traceback'], output['traceback'])
def test_exception_is_thrown_if_both_params_and_parameters_attributes_are_provided(self):
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_ACTION_PARAMS_AND_PARAMETERS_ATTRIBUTE
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
expected_msg = ('Either "params" or "parameters" attribute needs to be provided, but '
'not both')
self.assertRaisesRegexp(runnerexceptions.ActionRunnerPreRunError, expected_msg,
chain_runner.pre_run)
@mock.patch.object(action_db_util, 'get_action_by_ref',
mock.MagicMock(return_value=ACTION_1))
@mock.patch.object(action_service, 'request', return_value=(DummyActionExecution(), None))
def test_params_and_parameters_attributes_both_work(self, _):
# "params" attribute used
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_ACTION_PARAMS_ATTRIBUTE
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
original_build_liveaction_object = chain_runner._build_liveaction_object
def mock_build_liveaction_object(action_node, resolved_params, parent_context):
# Verify parameters are correctly passed to the action
self.assertEqual(resolved_params, {'pparams': 'v1'})
original_build_liveaction_object(action_node=action_node,
resolved_params=resolved_params,
parent_context=parent_context)
chain_runner._build_liveaction_object = mock_build_liveaction_object
action_parameters = {}
status, output, _ = chain_runner.run(action_parameters=action_parameters)
self.assertEqual(status, LIVEACTION_STATUS_SUCCEEDED)
# "parameters" attribute used
chain_runner = acr.get_runner()
chain_runner.entry_point = CHAIN_ACTION_PARAMETERS_ATTRIBUTE
chain_runner.action = ACTION_2
chain_runner.container_service = RunnerContainerService()
chain_runner.pre_run()
def mock_build_liveaction_object(action_node, resolved_params, parent_context):
# Verify parameters are correctly passed to the action
self.assertEqual(resolved_params, {'pparameters': 'v1'})
original_build_liveaction_object(action_node=action_node,
resolved_params=resolved_params,
parent_context=parent_context)
chain_runner._build_liveaction_object = mock_build_liveaction_object
action_parameters = {}
status, output, _ = chain_runner.run(action_parameters=action_parameters)
self.assertEqual(status, LIVEACTION_STATUS_SUCCEEDED)
@classmethod
def tearDownClass(cls):
FixturesLoader().delete_models_from_db(MODELS)
| punalpatel/st2 | contrib/runners/action_chain_runner/tests/unit/test_actionchain.py | Python | apache-2.0 | 40,041 |
input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 0 0
1 11 0 0
1 12 0 0
1 13 0 0
1 14 0 0
1 15 0 0
1 16 0 0
1 17 0 0
1 18 0 0
1 19 0 0
1 20 0 0
1 21 0 0
1 22 0 0
1 23 0 0
1 24 0 0
1 25 0 0
1 26 0 0
1 27 0 0
1 28 0 0
1 29 0 0
1 30 0 0
1 31 0 0
1 32 0 0
1 33 0 0
1 34 0 0
1 35 0 0
1 36 0 0
1 37 0 0
1 38 0 0
1 39 0 0
1 40 0 0
1 41 0 0
1 42 0 0
1 43 0 0
1 44 0 0
1 45 0 0
1 46 0 0
1 47 0 0
1 48 0 0
1 49 0 0
1 50 0 0
1 51 0 0
1 52 0 0
1 53 0 0
1 54 0 0
1 55 0 0
3 132 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 1 0 188
2 189 132 0 1 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
1 1 2 1 189 188
2 190 132 0 2 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
1 1 2 0 190 188
1 188 0 0
3 132 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 1 0 323
2 324 132 0 1 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
1 1 2 1 324 323
2 325 132 0 2 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
1 1 2 0 325 323
1 323 0 0
1 326 1 0 187
1 327 1 0 186
1 328 1 0 185
1 329 1 0 184
1 330 1 0 183
1 331 1 0 182
1 332 1 0 181
1 333 1 0 180
1 334 1 0 179
1 335 1 0 178
1 336 1 0 177
1 337 1 0 176
1 338 1 0 175
1 339 1 0 174
1 340 1 0 173
1 341 1 0 172
1 342 1 0 171
1 343 1 0 170
1 344 1 0 169
1 345 1 0 168
1 346 1 0 167
1 347 1 0 166
1 348 1 0 165
1 349 1 0 164
1 350 1 0 163
1 351 1 0 162
1 352 1 0 161
1 353 1 0 160
1 354 1 0 159
1 355 1 0 158
1 356 1 0 157
1 357 1 0 156
1 358 1 0 155
1 359 1 0 322
1 360 1 0 321
1 361 1 0 320
1 362 1 0 319
1 363 1 0 318
1 364 1 0 317
1 365 1 0 316
1 366 1 0 315
1 367 1 0 314
1 368 1 0 313
1 369 1 0 312
1 370 1 0 311
1 371 1 0 310
1 372 1 0 309
1 373 1 0 308
1 374 1 0 307
1 375 1 0 306
1 376 1 0 305
1 377 1 0 304
1 378 1 0 303
1 379 1 0 302
1 380 1 0 301
1 381 1 0 300
1 382 1 0 299
1 383 1 0 298
1 384 1 0 297
1 385 1 0 296
1 386 1 0 295
1 387 1 0 294
1 388 1 0 293
1 389 1 0 292
1 390 1 0 291
1 391 1 0 290
1 327 1 0 187
1 328 1 0 186
1 330 1 0 184
1 331 1 0 183
1 333 1 0 181
1 334 1 0 180
1 335 1 0 179
1 336 1 0 178
1 337 1 0 177
1 338 1 0 176
1 340 1 0 174
1 341 1 0 173
1 342 1 0 172
1 343 1 0 171
1 344 1 0 170
1 345 1 0 169
1 347 1 0 167
1 348 1 0 166
1 349 1 0 165
1 350 1 0 164
1 351 1 0 163
1 352 1 0 162
1 354 1 0 160
1 355 1 0 159
1 357 1 0 157
1 358 1 0 156
1 360 1 0 322
1 361 1 0 321
1 363 1 0 319
1 364 1 0 318
1 366 1 0 316
1 367 1 0 315
1 368 1 0 314
1 369 1 0 313
1 370 1 0 312
1 371 1 0 311
1 373 1 0 309
1 374 1 0 308
1 375 1 0 307
1 376 1 0 306
1 377 1 0 305
1 378 1 0 304
1 380 1 0 302
1 381 1 0 301
1 382 1 0 300
1 383 1 0 299
1 384 1 0 298
1 385 1 0 297
1 387 1 0 295
1 388 1 0 294
1 390 1 0 292
1 391 1 0 291
1 392 1 0 187
1 393 1 0 184
1 394 1 0 181
1 395 1 0 180
1 396 1 0 179
1 397 1 0 178
1 398 1 0 177
1 399 1 0 174
1 400 1 0 173
1 401 1 0 172
1 402 1 0 171
1 403 1 0 170
1 404 1 0 167
1 405 1 0 166
1 406 1 0 165
1 407 1 0 164
1 408 1 0 163
1 409 1 0 160
1 410 1 0 157
1 411 1 0 322
1 412 1 0 319
1 413 1 0 316
1 414 1 0 315
1 415 1 0 314
1 416 1 0 313
1 417 1 0 312
1 418 1 0 309
1 419 1 0 308
1 420 1 0 307
1 421 1 0 306
1 422 1 0 305
1 423 1 0 302
1 424 1 0 301
1 425 1 0 300
1 426 1 0 299
1 427 1 0 298
1 428 1 0 295
1 429 1 0 292
1 326 1 0 154
1 327 1 0 153
1 328 1 0 152
1 329 1 0 151
1 330 1 0 150
1 331 1 0 149
1 332 1 0 148
1 333 1 0 147
1 334 1 0 146
1 335 1 0 145
1 336 1 0 144
1 337 1 0 143
1 338 1 0 142
1 339 1 0 141
1 340 1 0 140
1 341 1 0 139
1 342 1 0 138
1 343 1 0 137
1 344 1 0 136
1 345 1 0 135
1 346 1 0 134
1 347 1 0 133
1 348 1 0 132
1 349 1 0 131
1 350 1 0 130
1 351 1 0 129
1 352 1 0 128
1 353 1 0 127
1 354 1 0 126
1 355 1 0 125
1 356 1 0 124
1 357 1 0 123
1 358 1 0 122
1 359 1 0 289
1 360 1 0 288
1 361 1 0 287
1 362 1 0 286
1 363 1 0 285
1 364 1 0 284
1 365 1 0 283
1 366 1 0 282
1 367 1 0 281
1 368 1 0 280
1 369 1 0 279
1 370 1 0 278
1 371 1 0 277
1 372 1 0 276
1 373 1 0 275
1 374 1 0 274
1 375 1 0 273
1 376 1 0 272
1 377 1 0 271
1 378 1 0 270
1 379 1 0 269
1 380 1 0 268
1 381 1 0 267
1 382 1 0 266
1 383 1 0 265
1 384 1 0 264
1 385 1 0 263
1 386 1 0 262
1 387 1 0 261
1 388 1 0 260
1 389 1 0 259
1 390 1 0 258
1 391 1 0 257
1 326 1 0 153
1 327 1 0 152
1 329 1 0 150
1 330 1 0 149
1 332 1 0 147
1 333 1 0 146
1 334 1 0 145
1 335 1 0 144
1 336 1 0 143
1 337 1 0 142
1 339 1 0 140
1 340 1 0 139
1 341 1 0 138
1 342 1 0 137
1 343 1 0 136
1 344 1 0 135
1 346 1 0 133
1 347 1 0 132
1 348 1 0 131
1 349 1 0 130
1 350 1 0 129
1 351 1 0 128
1 353 1 0 126
1 354 1 0 125
1 356 1 0 123
1 357 1 0 122
1 359 1 0 288
1 360 1 0 287
1 362 1 0 285
1 363 1 0 284
1 365 1 0 282
1 366 1 0 281
1 367 1 0 280
1 368 1 0 279
1 369 1 0 278
1 370 1 0 277
1 372 1 0 275
1 373 1 0 274
1 374 1 0 273
1 375 1 0 272
1 376 1 0 271
1 377 1 0 270
1 379 1 0 268
1 380 1 0 267
1 381 1 0 266
1 382 1 0 265
1 383 1 0 264
1 384 1 0 263
1 386 1 0 261
1 387 1 0 260
1 389 1 0 258
1 390 1 0 257
1 430 1 0 152
1 431 1 0 149
1 432 1 0 146
1 433 1 0 145
1 394 1 0 144
1 395 1 0 143
1 396 1 0 142
1 434 1 0 139
1 435 1 0 138
1 399 1 0 137
1 400 1 0 136
1 401 1 0 135
1 436 1 0 132
1 437 1 0 131
1 404 1 0 130
1 405 1 0 129
1 406 1 0 128
1 438 1 0 125
1 439 1 0 122
1 440 1 0 287
1 441 1 0 284
1 442 1 0 281
1 443 1 0 280
1 413 1 0 279
1 414 1 0 278
1 415 1 0 277
1 444 1 0 274
1 445 1 0 273
1 418 1 0 272
1 419 1 0 271
1 420 1 0 270
1 446 1 0 267
1 447 1 0 266
1 423 1 0 265
1 424 1 0 264
1 425 1 0 263
1 448 1 0 260
1 449 1 0 257
1 326 1 0 121
1 327 1 0 120
1 328 1 0 119
1 329 1 0 118
1 330 1 0 117
1 331 1 0 116
1 332 1 0 115
1 333 1 0 114
1 334 1 0 113
1 335 1 0 112
1 336 1 0 111
1 337 1 0 110
1 338 1 0 109
1 339 1 0 108
1 340 1 0 107
1 341 1 0 106
1 342 1 0 105
1 343 1 0 104
1 344 1 0 103
1 345 1 0 102
1 346 1 0 101
1 347 1 0 100
1 348 1 0 99
1 349 1 0 98
1 350 1 0 97
1 351 1 0 96
1 352 1 0 95
1 353 1 0 94
1 354 1 0 93
1 355 1 0 92
1 356 1 0 91
1 357 1 0 90
1 358 1 0 89
1 359 1 0 256
1 360 1 0 255
1 361 1 0 254
1 362 1 0 253
1 363 1 0 252
1 364 1 0 251
1 365 1 0 250
1 366 1 0 249
1 367 1 0 248
1 368 1 0 247
1 369 1 0 246
1 370 1 0 245
1 371 1 0 244
1 372 1 0 243
1 373 1 0 242
1 374 1 0 241
1 375 1 0 240
1 376 1 0 239
1 377 1 0 238
1 378 1 0 237
1 379 1 0 236
1 380 1 0 235
1 381 1 0 234
1 382 1 0 233
1 383 1 0 232
1 384 1 0 231
1 385 1 0 230
1 386 1 0 229
1 387 1 0 228
1 388 1 0 227
1 389 1 0 226
1 390 1 0 225
1 391 1 0 224
1 326 1 0 118
1 327 1 0 117
1 328 1 0 116
1 339 1 0 115
1 340 1 0 114
1 341 1 0 113
1 342 1 0 112
1 343 1 0 111
1 344 1 0 110
1 345 1 0 109
1 346 1 0 108
1 347 1 0 107
1 348 1 0 106
1 349 1 0 105
1 350 1 0 104
1 351 1 0 103
1 352 1 0 102
1 329 1 0 99
1 330 1 0 98
1 331 1 0 97
1 334 1 0 94
1 335 1 0 93
1 336 1 0 92
1 353 1 0 91
1 354 1 0 90
1 355 1 0 89
1 359 1 0 253
1 360 1 0 252
1 361 1 0 251
1 372 1 0 250
1 373 1 0 249
1 374 1 0 248
1 375 1 0 247
1 376 1 0 246
1 377 1 0 245
1 378 1 0 244
1 379 1 0 243
1 380 1 0 242
1 381 1 0 241
1 382 1 0 240
1 383 1 0 239
1 384 1 0 238
1 385 1 0 237
1 362 1 0 234
1 363 1 0 233
1 364 1 0 232
1 367 1 0 229
1 368 1 0 228
1 369 1 0 227
1 386 1 0 226
1 387 1 0 225
1 388 1 0 224
1 436 1 0 115
1 437 1 0 114
1 404 1 0 113
1 405 1 0 112
1 406 1 0 111
1 407 1 0 110
1 408 1 0 109
1 431 1 0 106
1 450 1 0 105
1 393 1 0 104
1 430 1 0 99
1 451 1 0 98
1 392 1 0 97
1 399 1 0 94
1 400 1 0 93
1 401 1 0 92
1 394 1 0 91
1 395 1 0 90
1 396 1 0 89
1 446 1 0 250
1 447 1 0 249
1 423 1 0 248
1 424 1 0 247
1 425 1 0 246
1 426 1 0 245
1 427 1 0 244
1 441 1 0 241
1 452 1 0 240
1 412 1 0 239
1 440 1 0 234
1 453 1 0 233
1 411 1 0 232
1 418 1 0 229
1 419 1 0 228
1 420 1 0 227
1 413 1 0 226
1 414 1 0 225
1 415 1 0 224
1 326 1 0 88
1 327 1 0 87
1 328 1 0 86
1 329 1 0 85
1 330 1 0 84
1 331 1 0 83
1 332 1 0 82
1 333 1 0 81
1 334 1 0 80
1 335 1 0 79
1 336 1 0 78
1 337 1 0 77
1 338 1 0 76
1 339 1 0 75
1 340 1 0 74
1 341 1 0 73
1 342 1 0 72
1 343 1 0 71
1 344 1 0 70
1 345 1 0 69
1 346 1 0 68
1 347 1 0 67
1 348 1 0 66
1 349 1 0 65
1 350 1 0 64
1 351 1 0 63
1 352 1 0 62
1 353 1 0 61
1 354 1 0 60
1 355 1 0 59
1 356 1 0 58
1 357 1 0 57
1 358 1 0 56
1 359 1 0 223
1 360 1 0 222
1 361 1 0 221
1 362 1 0 220
1 363 1 0 219
1 364 1 0 218
1 365 1 0 217
1 366 1 0 216
1 367 1 0 215
1 368 1 0 214
1 369 1 0 213
1 370 1 0 212
1 371 1 0 211
1 372 1 0 210
1 373 1 0 209
1 374 1 0 208
1 375 1 0 207
1 376 1 0 206
1 377 1 0 205
1 378 1 0 204
1 379 1 0 203
1 380 1 0 202
1 381 1 0 201
1 382 1 0 200
1 383 1 0 199
1 384 1 0 198
1 385 1 0 197
1 386 1 0 196
1 387 1 0 195
1 388 1 0 194
1 389 1 0 193
1 390 1 0 192
1 391 1 0 191
1 329 1 0 88
1 330 1 0 87
1 331 1 0 86
1 348 1 0 85
1 349 1 0 84
1 350 1 0 83
1 353 1 0 80
1 354 1 0 79
1 355 1 0 78
1 332 1 0 75
1 333 1 0 74
1 334 1 0 73
1 335 1 0 72
1 336 1 0 71
1 337 1 0 70
1 338 1 0 69
1 339 1 0 68
1 340 1 0 67
1 341 1 0 66
1 342 1 0 65
1 343 1 0 64
1 344 1 0 63
1 345 1 0 62
1 356 1 0 61
1 357 1 0 60
1 358 1 0 59
1 362 1 0 223
1 363 1 0 222
1 364 1 0 221
1 381 1 0 220
1 382 1 0 219
1 383 1 0 218
1 386 1 0 215
1 387 1 0 214
1 388 1 0 213
1 365 1 0 210
1 366 1 0 209
1 367 1 0 208
1 368 1 0 207
1 369 1 0 206
1 370 1 0 205
1 371 1 0 204
1 372 1 0 203
1 373 1 0 202
1 374 1 0 201
1 375 1 0 200
1 376 1 0 199
1 377 1 0 198
1 378 1 0 197
1 389 1 0 196
1 390 1 0 195
1 391 1 0 194
1 404 1 0 88
1 405 1 0 87
1 406 1 0 86
1 399 1 0 85
1 400 1 0 84
1 401 1 0 83
1 439 1 0 80
1 454 1 0 79
1 410 1 0 78
1 438 1 0 73
1 455 1 0 72
1 409 1 0 71
1 432 1 0 68
1 433 1 0 67
1 394 1 0 66
1 395 1 0 65
1 396 1 0 64
1 397 1 0 63
1 398 1 0 62
1 423 1 0 223
1 424 1 0 222
1 425 1 0 221
1 418 1 0 220
1 419 1 0 219
1 420 1 0 218
1 449 1 0 215
1 456 1 0 214
1 429 1 0 213
1 448 1 0 208
1 457 1 0 207
1 428 1 0 206
1 442 1 0 203
1 443 1 0 202
1 413 1 0 201
1 414 1 0 200
1 415 1 0 199
1 416 1 0 198
1 417 1 0 197
1 458 1 0 187
1 459 1 0 186
1 460 1 0 185
1 461 1 0 184
1 462 1 0 183
1 463 1 0 182
1 464 1 0 181
1 465 1 0 180
1 466 1 0 179
1 467 1 0 178
1 468 1 0 177
1 469 1 0 176
1 470 1 0 175
1 471 1 0 174
1 472 1 0 173
1 473 1 0 172
1 474 1 0 171
1 475 1 0 170
1 476 1 0 169
1 477 1 0 168
1 478 1 0 167
1 479 1 0 166
1 480 1 0 165
1 481 1 0 164
1 482 1 0 163
1 483 1 0 162
1 484 1 0 161
1 485 1 0 160
1 486 1 0 159
1 487 1 0 158
1 488 1 0 157
1 489 1 0 156
1 490 1 0 155
1 491 1 0 322
1 492 1 0 321
1 493 1 0 320
1 494 1 0 319
1 495 1 0 318
1 496 1 0 317
1 497 1 0 316
1 498 1 0 315
1 499 1 0 314
1 500 1 0 313
1 501 1 0 312
1 502 1 0 311
1 503 1 0 310
1 504 1 0 309
1 505 1 0 308
1 506 1 0 307
1 507 1 0 306
1 508 1 0 305
1 509 1 0 304
1 510 1 0 303
1 511 1 0 302
1 512 1 0 301
1 513 1 0 300
1 514 1 0 299
1 515 1 0 298
1 516 1 0 297
1 517 1 0 296
1 518 1 0 295
1 519 1 0 294
1 520 1 0 293
1 521 1 0 292
1 522 1 0 291
1 523 1 0 290
1 459 1 0 187
1 460 1 0 186
1 462 1 0 184
1 463 1 0 183
1 465 1 0 181
1 466 1 0 180
1 467 1 0 179
1 468 1 0 178
1 469 1 0 177
1 470 1 0 176
1 472 1 0 174
1 473 1 0 173
1 474 1 0 172
1 475 1 0 171
1 476 1 0 170
1 477 1 0 169
1 479 1 0 167
1 480 1 0 166
1 481 1 0 165
1 482 1 0 164
1 483 1 0 163
1 484 1 0 162
1 486 1 0 160
1 487 1 0 159
1 489 1 0 157
1 490 1 0 156
1 492 1 0 322
1 493 1 0 321
1 495 1 0 319
1 496 1 0 318
1 498 1 0 316
1 499 1 0 315
1 500 1 0 314
1 501 1 0 313
1 502 1 0 312
1 503 1 0 311
1 505 1 0 309
1 506 1 0 308
1 507 1 0 307
1 508 1 0 306
1 509 1 0 305
1 510 1 0 304
1 512 1 0 302
1 513 1 0 301
1 514 1 0 300
1 515 1 0 299
1 516 1 0 298
1 517 1 0 297
1 519 1 0 295
1 520 1 0 294
1 522 1 0 292
1 523 1 0 291
1 460 1 0 187
1 463 1 0 184
1 466 1 0 181
1 467 1 0 180
1 468 1 0 179
1 469 1 0 178
1 470 1 0 177
1 473 1 0 174
1 474 1 0 173
1 475 1 0 172
1 476 1 0 171
1 477 1 0 170
1 480 1 0 167
1 481 1 0 166
1 482 1 0 165
1 483 1 0 164
1 484 1 0 163
1 487 1 0 160
1 490 1 0 157
1 493 1 0 322
1 496 1 0 319
1 499 1 0 316
1 500 1 0 315
1 501 1 0 314
1 502 1 0 313
1 503 1 0 312
1 506 1 0 309
1 507 1 0 308
1 508 1 0 307
1 509 1 0 306
1 510 1 0 305
1 513 1 0 302
1 514 1 0 301
1 515 1 0 300
1 516 1 0 299
1 517 1 0 298
1 520 1 0 295
1 523 1 0 292
1 458 1 0 154
1 459 1 0 153
1 460 1 0 152
1 461 1 0 151
1 462 1 0 150
1 463 1 0 149
1 464 1 0 148
1 465 1 0 147
1 466 1 0 146
1 467 1 0 145
1 468 1 0 144
1 469 1 0 143
1 470 1 0 142
1 471 1 0 141
1 472 1 0 140
1 473 1 0 139
1 474 1 0 138
1 475 1 0 137
1 476 1 0 136
1 477 1 0 135
1 478 1 0 134
1 479 1 0 133
1 480 1 0 132
1 481 1 0 131
1 482 1 0 130
1 483 1 0 129
1 484 1 0 128
1 485 1 0 127
1 486 1 0 126
1 487 1 0 125
1 488 1 0 124
1 489 1 0 123
1 490 1 0 122
1 491 1 0 289
1 492 1 0 288
1 493 1 0 287
1 494 1 0 286
1 495 1 0 285
1 496 1 0 284
1 497 1 0 283
1 498 1 0 282
1 499 1 0 281
1 500 1 0 280
1 501 1 0 279
1 502 1 0 278
1 503 1 0 277
1 504 1 0 276
1 505 1 0 275
1 506 1 0 274
1 507 1 0 273
1 508 1 0 272
1 509 1 0 271
1 510 1 0 270
1 511 1 0 269
1 512 1 0 268
1 513 1 0 267
1 514 1 0 266
1 515 1 0 265
1 516 1 0 264
1 517 1 0 263
1 518 1 0 262
1 519 1 0 261
1 520 1 0 260
1 521 1 0 259
1 522 1 0 258
1 523 1 0 257
1 458 1 0 153
1 459 1 0 152
1 461 1 0 150
1 462 1 0 149
1 464 1 0 147
1 465 1 0 146
1 466 1 0 145
1 467 1 0 144
1 468 1 0 143
1 469 1 0 142
1 471 1 0 140
1 472 1 0 139
1 473 1 0 138
1 474 1 0 137
1 475 1 0 136
1 476 1 0 135
1 478 1 0 133
1 479 1 0 132
1 480 1 0 131
1 481 1 0 130
1 482 1 0 129
1 483 1 0 128
1 485 1 0 126
1 486 1 0 125
1 488 1 0 123
1 489 1 0 122
1 491 1 0 288
1 492 1 0 287
1 494 1 0 285
1 495 1 0 284
1 497 1 0 282
1 498 1 0 281
1 499 1 0 280
1 500 1 0 279
1 501 1 0 278
1 502 1 0 277
1 504 1 0 275
1 505 1 0 274
1 506 1 0 273
1 507 1 0 272
1 508 1 0 271
1 509 1 0 270
1 511 1 0 268
1 512 1 0 267
1 513 1 0 266
1 514 1 0 265
1 515 1 0 264
1 516 1 0 263
1 518 1 0 261
1 519 1 0 260
1 521 1 0 258
1 522 1 0 257
1 458 1 0 152
1 461 1 0 149
1 464 1 0 146
1 465 1 0 145
1 466 1 0 144
1 467 1 0 143
1 468 1 0 142
1 471 1 0 139
1 472 1 0 138
1 473 1 0 137
1 474 1 0 136
1 475 1 0 135
1 478 1 0 132
1 479 1 0 131
1 480 1 0 130
1 481 1 0 129
1 482 1 0 128
1 485 1 0 125
1 488 1 0 122
1 491 1 0 287
1 494 1 0 284
1 497 1 0 281
1 498 1 0 280
1 499 1 0 279
1 500 1 0 278
1 501 1 0 277
1 504 1 0 274
1 505 1 0 273
1 506 1 0 272
1 507 1 0 271
1 508 1 0 270
1 511 1 0 267
1 512 1 0 266
1 513 1 0 265
1 514 1 0 264
1 515 1 0 263
1 518 1 0 260
1 521 1 0 257
1 458 1 0 121
1 459 1 0 120
1 460 1 0 119
1 461 1 0 118
1 462 1 0 117
1 463 1 0 116
1 464 1 0 115
1 465 1 0 114
1 466 1 0 113
1 467 1 0 112
1 468 1 0 111
1 469 1 0 110
1 470 1 0 109
1 471 1 0 108
1 472 1 0 107
1 473 1 0 106
1 474 1 0 105
1 475 1 0 104
1 476 1 0 103
1 477 1 0 102
1 478 1 0 101
1 479 1 0 100
1 480 1 0 99
1 481 1 0 98
1 482 1 0 97
1 483 1 0 96
1 484 1 0 95
1 485 1 0 94
1 486 1 0 93
1 487 1 0 92
1 488 1 0 91
1 489 1 0 90
1 490 1 0 89
1 491 1 0 256
1 492 1 0 255
1 493 1 0 254
1 494 1 0 253
1 495 1 0 252
1 496 1 0 251
1 497 1 0 250
1 498 1 0 249
1 499 1 0 248
1 500 1 0 247
1 501 1 0 246
1 502 1 0 245
1 503 1 0 244
1 504 1 0 243
1 505 1 0 242
1 506 1 0 241
1 507 1 0 240
1 508 1 0 239
1 509 1 0 238
1 510 1 0 237
1 511 1 0 236
1 512 1 0 235
1 513 1 0 234
1 514 1 0 233
1 515 1 0 232
1 516 1 0 231
1 517 1 0 230
1 518 1 0 229
1 519 1 0 228
1 520 1 0 227
1 521 1 0 226
1 522 1 0 225
1 523 1 0 224
1 458 1 0 118
1 459 1 0 117
1 460 1 0 116
1 471 1 0 115
1 472 1 0 114
1 473 1 0 113
1 474 1 0 112
1 475 1 0 111
1 476 1 0 110
1 477 1 0 109
1 478 1 0 108
1 479 1 0 107
1 480 1 0 106
1 481 1 0 105
1 482 1 0 104
1 483 1 0 103
1 484 1 0 102
1 461 1 0 99
1 462 1 0 98
1 463 1 0 97
1 466 1 0 94
1 467 1 0 93
1 468 1 0 92
1 485 1 0 91
1 486 1 0 90
1 487 1 0 89
1 491 1 0 253
1 492 1 0 252
1 493 1 0 251
1 504 1 0 250
1 505 1 0 249
1 506 1 0 248
1 507 1 0 247
1 508 1 0 246
1 509 1 0 245
1 510 1 0 244
1 511 1 0 243
1 512 1 0 242
1 513 1 0 241
1 514 1 0 240
1 515 1 0 239
1 516 1 0 238
1 517 1 0 237
1 494 1 0 234
1 495 1 0 233
1 496 1 0 232
1 499 1 0 229
1 500 1 0 228
1 501 1 0 227
1 518 1 0 226
1 519 1 0 225
1 520 1 0 224
1 478 1 0 115
1 479 1 0 114
1 480 1 0 113
1 481 1 0 112
1 482 1 0 111
1 483 1 0 110
1 484 1 0 109
1 461 1 0 106
1 462 1 0 105
1 463 1 0 104
1 458 1 0 99
1 459 1 0 98
1 460 1 0 97
1 473 1 0 94
1 474 1 0 93
1 475 1 0 92
1 466 1 0 91
1 467 1 0 90
1 468 1 0 89
1 511 1 0 250
1 512 1 0 249
1 513 1 0 248
1 514 1 0 247
1 515 1 0 246
1 516 1 0 245
1 517 1 0 244
1 494 1 0 241
1 495 1 0 240
1 496 1 0 239
1 491 1 0 234
1 492 1 0 233
1 493 1 0 232
1 506 1 0 229
1 507 1 0 228
1 508 1 0 227
1 499 1 0 226
1 500 1 0 225
1 501 1 0 224
1 458 1 0 88
1 459 1 0 87
1 460 1 0 86
1 461 1 0 85
1 462 1 0 84
1 463 1 0 83
1 464 1 0 82
1 465 1 0 81
1 466 1 0 80
1 467 1 0 79
1 468 1 0 78
1 469 1 0 77
1 470 1 0 76
1 471 1 0 75
1 472 1 0 74
1 473 1 0 73
1 474 1 0 72
1 475 1 0 71
1 476 1 0 70
1 477 1 0 69
1 478 1 0 68
1 479 1 0 67
1 480 1 0 66
1 481 1 0 65
1 482 1 0 64
1 483 1 0 63
1 484 1 0 62
1 485 1 0 61
1 486 1 0 60
1 487 1 0 59
1 488 1 0 58
1 489 1 0 57
1 490 1 0 56
1 491 1 0 223
1 492 1 0 222
1 493 1 0 221
1 494 1 0 220
1 495 1 0 219
1 496 1 0 218
1 497 1 0 217
1 498 1 0 216
1 499 1 0 215
1 500 1 0 214
1 501 1 0 213
1 502 1 0 212
1 503 1 0 211
1 504 1 0 210
1 505 1 0 209
1 506 1 0 208
1 507 1 0 207
1 508 1 0 206
1 509 1 0 205
1 510 1 0 204
1 511 1 0 203
1 512 1 0 202
1 513 1 0 201
1 514 1 0 200
1 515 1 0 199
1 516 1 0 198
1 517 1 0 197
1 518 1 0 196
1 519 1 0 195
1 520 1 0 194
1 521 1 0 193
1 522 1 0 192
1 523 1 0 191
1 461 1 0 88
1 462 1 0 87
1 463 1 0 86
1 480 1 0 85
1 481 1 0 84
1 482 1 0 83
1 485 1 0 80
1 486 1 0 79
1 487 1 0 78
1 464 1 0 75
1 465 1 0 74
1 466 1 0 73
1 467 1 0 72
1 468 1 0 71
1 469 1 0 70
1 470 1 0 69
1 471 1 0 68
1 472 1 0 67
1 473 1 0 66
1 474 1 0 65
1 475 1 0 64
1 476 1 0 63
1 477 1 0 62
1 488 1 0 61
1 489 1 0 60
1 490 1 0 59
1 494 1 0 223
1 495 1 0 222
1 496 1 0 221
1 513 1 0 220
1 514 1 0 219
1 515 1 0 218
1 518 1 0 215
1 519 1 0 214
1 520 1 0 213
1 497 1 0 210
1 498 1 0 209
1 499 1 0 208
1 500 1 0 207
1 501 1 0 206
1 502 1 0 205
1 503 1 0 204
1 504 1 0 203
1 505 1 0 202
1 506 1 0 201
1 507 1 0 200
1 508 1 0 199
1 509 1 0 198
1 510 1 0 197
1 521 1 0 196
1 522 1 0 195
1 523 1 0 194
1 480 1 0 88
1 481 1 0 87
1 482 1 0 86
1 473 1 0 85
1 474 1 0 84
1 475 1 0 83
1 488 1 0 80
1 489 1 0 79
1 490 1 0 78
1 485 1 0 73
1 486 1 0 72
1 487 1 0 71
1 464 1 0 68
1 465 1 0 67
1 466 1 0 66
1 467 1 0 65
1 468 1 0 64
1 469 1 0 63
1 470 1 0 62
1 513 1 0 223
1 514 1 0 222
1 515 1 0 221
1 506 1 0 220
1 507 1 0 219
1 508 1 0 218
1 521 1 0 215
1 522 1 0 214
1 523 1 0 213
1 518 1 0 208
1 519 1 0 207
1 520 1 0 206
1 497 1 0 203
1 498 1 0 202
1 499 1 0 201
1 500 1 0 200
1 501 1 0 199
1 502 1 0 198
1 503 1 0 197
1 524 0 0
1 525 0 0
1 526 0 0
1 527 0 0
1 528 0 0
1 529 0 0
1 358 2 1 490 391
1 357 2 1 489 390
1 356 2 1 488 389
1 355 2 1 487 388
1 354 2 1 486 387
1 353 2 1 485 386
1 352 2 1 484 385
1 351 2 1 483 384
1 350 2 1 482 383
1 349 2 1 481 382
1 348 2 1 480 381
1 347 2 1 479 380
1 346 2 1 478 379
1 345 2 1 477 378
1 344 2 1 476 377
1 343 2 1 475 376
1 342 2 1 474 375
1 341 2 1 473 374
1 340 2 1 472 373
1 339 2 1 471 372
1 338 2 1 470 371
1 337 2 1 469 370
1 336 2 1 468 369
1 335 2 1 467 368
1 334 2 1 466 367
1 333 2 1 465 366
1 332 2 1 464 365
1 331 2 1 463 364
1 330 2 1 462 363
1 329 2 1 461 362
1 328 2 1 460 361
1 327 2 1 459 360
1 326 2 1 458 359
1 375 1 1 507
1 455 2 1 486 457
1 454 2 1 489 456
1 451 2 1 459 453
1 450 2 1 462 452
1 439 2 1 488 449
1 438 2 1 485 448
1 437 2 1 479 447
1 436 2 1 478 446
1 435 2 1 472 445
1 434 2 1 471 444
1 433 2 1 465 443
1 432 2 1 464 442
1 431 2 1 461 441
1 430 2 1 458 440
1 410 2 1 490 429
1 409 2 1 487 428
1 408 2 1 484 427
1 407 2 1 483 426
1 406 2 1 482 425
1 405 2 1 481 424
1 404 2 1 480 423
1 403 2 1 477 422
1 402 2 1 476 421
1 401 2 1 475 420
1 400 2 1 474 419
1 399 2 1 473 418
1 398 2 1 470 417
1 397 2 1 469 416
1 396 2 1 468 415
1 395 2 1 467 414
1 394 2 1 466 413
1 393 2 1 463 412
1 392 2 1 460 411
1 421 1 1 509
1 425 1 1 515
1 420 1 1 508
1 452 1 1 495
1 424 1 1 514
1 530 3 0 449 456 391
1 531 3 0 448 457 388
1 532 3 0 425 426 385
1 533 3 0 424 425 384
1 534 3 0 423 424 383
1 535 3 0 447 423 382
1 536 3 0 446 447 381
1 537 3 0 420 421 378
1 538 3 0 419 420 377
1 539 3 0 418 419 376
1 540 3 0 445 418 375
1 541 3 0 444 445 374
1 542 3 0 415 416 371
1 543 3 0 414 415 370
1 544 3 0 413 414 369
1 545 3 0 443 413 368
1 546 3 0 442 443 367
1 547 3 0 441 452 364
1 548 3 0 440 453 361
1 549 3 0 429 456 389
1 550 3 0 428 457 386
1 551 3 0 427 426 383
1 552 3 0 426 425 382
1 553 3 0 425 424 381
1 554 3 0 424 423 380
1 555 3 0 423 447 379
1 556 3 0 422 421 376
1 557 3 0 421 420 375
1 558 3 0 420 419 374
1 559 3 0 419 418 373
1 560 3 0 418 445 372
1 561 3 0 417 416 369
1 562 3 0 416 415 368
1 563 3 0 415 414 367
1 564 3 0 414 413 366
1 565 3 0 413 443 365
1 566 3 0 412 452 362
1 567 3 0 411 453 359
1 568 0 0
1 569 3 0 417 422 385
1 570 3 0 416 421 384
1 571 3 0 415 420 383
1 572 3 0 414 419 382
1 573 3 0 413 418 381
1 574 3 0 443 445 380
1 575 3 0 442 444 379
1 576 3 0 428 415 376
1 577 3 0 457 414 375
1 578 3 0 448 413 374
1 579 3 0 429 428 369
1 580 3 0 456 457 368
1 581 3 0 449 448 367
1 582 3 0 420 425 364
1 583 3 0 419 424 363
1 584 3 0 418 423 362
1 585 3 0 425 412 361
1 586 3 0 424 452 360
1 587 3 0 423 441 359
1 588 3 0 415 428 391
1 589 3 0 414 457 390
1 590 3 0 413 448 389
1 591 3 0 420 415 388
1 592 3 0 419 414 387
1 593 3 0 418 413 386
1 594 3 0 411 412 383
1 595 3 0 453 452 382
1 596 3 0 440 441 381
1 597 3 0 412 425 376
1 598 3 0 452 424 375
1 599 3 0 441 423 374
1 600 3 0 427 422 371
1 601 3 0 426 421 370
1 602 3 0 425 420 369
1 603 3 0 424 419 368
1 604 3 0 423 418 367
1 605 3 0 447 445 366
1 606 3 0 446 444 365
1 607 0 0
1 1 2 1 596 88
1 1 2 1 595 87
1 1 2 1 594 86
1 1 2 1 599 85
1 1 2 1 598 84
1 1 2 1 597 83
1 1 1 0 82
1 1 1 0 81
1 1 2 1 590 80
1 1 2 1 589 79
1 1 2 1 588 78
1 1 1 0 77
1 1 1 0 76
1 1 1 0 75
1 1 1 0 74
1 1 2 1 593 73
1 1 2 1 592 72
1 1 2 1 591 71
1 1 1 0 70
1 1 1 0 69
1 1 2 1 606 68
1 1 2 1 605 67
1 1 2 1 604 66
1 1 2 1 603 65
1 1 2 1 602 64
1 1 2 1 601 63
1 1 2 1 600 62
1 1 1 0 61
1 1 1 0 60
1 1 1 0 59
1 1 1 0 58
1 1 1 0 57
1 1 1 0 56
1 1 1 0 121
1 1 1 0 120
1 1 1 0 119
1 1 1 0 118
1 1 1 0 117
1 1 1 0 116
1 1 2 1 575 115
1 1 2 1 574 114
1 1 2 1 573 113
1 1 2 1 572 112
1 1 2 1 571 111
1 1 2 1 570 110
1 1 2 1 569 109
1 1 1 0 108
1 1 1 0 107
1 1 2 1 584 106
1 1 2 1 583 105
1 1 2 1 582 104
1 1 1 0 103
1 1 1 0 102
1 1 1 0 101
1 1 1 0 100
1 1 2 1 587 99
1 1 2 1 586 98
1 1 2 1 585 97
1 1 1 0 96
1 1 1 0 95
1 1 2 1 578 94
1 1 2 1 577 93
1 1 2 1 576 92
1 1 2 1 581 91
1 1 2 1 580 90
1 1 2 1 579 89
1 1 1 0 154
1 1 1 0 153
1 1 2 1 567 152
1 1 1 0 151
1 1 1 0 150
1 1 2 1 566 149
1 1 1 0 148
1 1 1 0 147
1 1 2 1 565 146
1 1 2 1 564 145
1 1 2 1 563 144
1 1 2 1 562 143
1 1 2 1 561 142
1 1 1 0 141
1 1 1 0 140
1 1 2 1 560 139
1 1 2 1 559 138
1 1 2 1 558 137
1 1 2 1 557 136
1 1 2 1 556 135
1 1 1 0 134
1 1 1 0 133
1 1 2 1 555 132
1 1 2 1 554 131
1 1 2 1 553 130
1 1 2 1 552 129
1 1 2 1 551 128
1 1 1 0 127
1 1 1 0 126
1 1 2 1 550 125
1 1 1 0 124
1 1 1 0 123
1 1 2 1 549 122
1 1 2 1 548 187
1 1 1 0 186
1 1 1 0 185
1 1 2 1 547 184
1 1 1 0 183
1 1 1 0 182
1 1 2 1 546 181
1 1 2 1 545 180
1 1 2 1 544 179
1 1 2 1 543 178
1 1 2 1 542 177
1 1 1 0 176
1 1 1 0 175
1 1 2 1 541 174
1 1 2 1 540 173
1 1 2 1 539 172
1 1 2 1 538 171
1 1 2 1 537 170
1 1 1 0 169
1 1 1 0 168
1 1 2 1 536 167
1 1 2 1 535 166
1 1 2 1 534 165
1 1 2 1 533 164
1 1 2 1 532 163
1 1 1 0 162
1 1 1 0 161
1 1 2 1 531 160
1 1 1 0 159
1 1 1 0 158
1 1 2 1 530 157
1 1 1 0 156
1 1 1 0 155
1 1 1 0 223
1 1 1 0 222
1 1 1 0 221
1 1 1 0 220
1 1 1 0 218
1 1 1 0 217
1 1 1 0 216
1 1 1 0 215
1 1 1 0 214
1 1 1 0 213
1 1 1 0 212
1 1 1 0 211
1 1 1 0 210
1 1 1 0 209
1 1 1 0 208
1 1 1 0 207
1 1 1 0 206
1 1 1 0 205
1 1 1 0 204
1 1 1 0 203
1 1 1 0 202
1 1 1 0 201
1 1 1 0 200
1 1 1 0 199
1 1 1 0 198
1 1 1 0 197
1 1 1 0 196
1 1 1 0 195
1 1 1 0 194
1 1 1 0 193
1 1 1 0 192
1 1 1 0 191
1 1 1 0 256
1 1 1 0 255
1 1 1 0 254
1 1 1 0 253
1 1 1 0 252
1 1 1 0 251
1 1 1 0 250
1 1 1 0 249
1 1 1 0 248
1 1 1 0 247
1 1 1 0 246
1 1 1 0 245
1 1 1 0 244
1 1 1 0 243
1 1 1 0 242
1 1 1 0 241
1 1 1 0 240
1 1 1 0 239
1 1 1 0 238
1 1 1 0 237
1 1 1 0 236
1 1 1 0 235
1 1 1 0 234
1 1 1 0 233
1 1 1 0 232
1 1 1 0 231
1 1 1 0 230
1 1 1 0 229
1 1 1 0 228
1 1 1 0 227
1 1 1 0 226
1 1 1 0 225
1 1 1 0 224
1 1 1 0 289
1 1 1 0 288
1 1 1 0 287
1 1 1 0 286
1 1 1 0 285
1 1 1 0 284
1 1 1 0 283
1 1 1 0 282
1 1 1 0 281
1 1 1 0 280
1 1 1 0 279
1 1 1 0 278
1 1 1 0 277
1 1 1 0 276
1 1 1 0 275
1 1 1 0 274
1 1 1 0 273
1 1 1 0 272
1 1 1 0 270
1 1 1 0 269
1 1 1 0 268
1 1 1 0 267
1 1 1 0 266
1 1 1 0 265
1 1 1 0 264
1 1 1 0 263
1 1 1 0 262
1 1 1 0 261
1 1 1 0 260
1 1 1 0 259
1 1 1 0 258
1 1 1 0 257
1 1 1 0 322
1 1 1 0 321
1 1 1 0 320
1 1 1 0 319
1 1 1 0 318
1 1 1 0 317
1 1 1 0 316
1 1 1 0 315
1 1 1 0 314
1 1 1 0 313
1 1 1 0 312
1 1 1 0 311
1 1 1 0 310
1 1 1 0 309
1 1 1 0 308
1 1 1 0 307
1 1 1 0 306
1 1 1 0 305
1 1 1 0 304
1 1 1 0 303
1 1 1 0 302
1 1 1 0 301
1 1 1 0 300
1 1 1 0 299
1 1 1 0 298
1 1 1 0 297
1 1 1 0 296
1 1 1 0 295
1 1 1 0 294
1 1 1 0 293
1 1 1 0 292
1 1 1 0 291
1 1 1 0 290
0
2 range(1)
15 range(2)
16 range(3)
17 range(4)
18 range(5)
19 range(6)
20 range(7)
56 move(2,right,7,3)
57 move(2,right,7,4)
58 move(2,right,7,5)
59 move(2,right,6,3)
60 move(2,right,6,4)
61 move(2,right,6,5)
62 move(2,right,3,1)
63 move(2,right,3,2)
64 move(2,right,3,3)
65 move(2,right,3,4)
66 move(2,right,3,5)
67 move(2,right,3,6)
68 move(2,right,3,7)
69 move(2,right,4,1)
70 move(2,right,4,2)
71 move(2,right,4,3)
72 move(2,right,4,4)
73 move(2,right,4,5)
74 move(2,right,4,6)
75 move(2,right,4,7)
76 move(2,right,5,1)
77 move(2,right,5,2)
78 move(2,right,5,3)
79 move(2,right,5,4)
80 move(2,right,5,5)
81 move(2,right,5,6)
82 move(2,right,5,7)
83 move(2,right,2,3)
84 move(2,right,2,4)
85 move(2,right,2,5)
86 move(2,right,1,3)
87 move(2,right,1,4)
88 move(2,right,1,5)
89 move(2,left,7,3)
90 move(2,left,7,4)
91 move(2,left,7,5)
92 move(2,left,6,3)
93 move(2,left,6,4)
94 move(2,left,6,5)
95 move(2,left,3,1)
96 move(2,left,3,2)
97 move(2,left,3,3)
98 move(2,left,3,4)
99 move(2,left,3,5)
100 move(2,left,3,6)
101 move(2,left,3,7)
102 move(2,left,4,1)
103 move(2,left,4,2)
104 move(2,left,4,3)
105 move(2,left,4,4)
106 move(2,left,4,5)
107 move(2,left,4,6)
108 move(2,left,4,7)
109 move(2,left,5,1)
110 move(2,left,5,2)
111 move(2,left,5,3)
112 move(2,left,5,4)
113 move(2,left,5,5)
114 move(2,left,5,6)
115 move(2,left,5,7)
116 move(2,left,2,3)
117 move(2,left,2,4)
118 move(2,left,2,5)
119 move(2,left,1,3)
120 move(2,left,1,4)
121 move(2,left,1,5)
122 move(2,down,7,3)
123 move(2,down,7,4)
124 move(2,down,7,5)
125 move(2,down,6,3)
126 move(2,down,6,4)
127 move(2,down,6,5)
128 move(2,down,3,1)
129 move(2,down,3,2)
130 move(2,down,3,3)
131 move(2,down,3,4)
132 move(2,down,3,5)
133 move(2,down,3,6)
134 move(2,down,3,7)
135 move(2,down,4,1)
136 move(2,down,4,2)
137 move(2,down,4,3)
138 move(2,down,4,4)
139 move(2,down,4,5)
140 move(2,down,4,6)
141 move(2,down,4,7)
142 move(2,down,5,1)
143 move(2,down,5,2)
144 move(2,down,5,3)
145 move(2,down,5,4)
146 move(2,down,5,5)
147 move(2,down,5,6)
148 move(2,down,5,7)
149 move(2,down,2,3)
150 move(2,down,2,4)
151 move(2,down,2,5)
152 move(2,down,1,3)
153 move(2,down,1,4)
154 move(2,down,1,5)
155 move(2,up,7,3)
156 move(2,up,7,4)
157 move(2,up,7,5)
158 move(2,up,6,3)
159 move(2,up,6,4)
160 move(2,up,6,5)
161 move(2,up,3,1)
162 move(2,up,3,2)
163 move(2,up,3,3)
164 move(2,up,3,4)
165 move(2,up,3,5)
166 move(2,up,3,6)
167 move(2,up,3,7)
168 move(2,up,4,1)
169 move(2,up,4,2)
170 move(2,up,4,3)
171 move(2,up,4,4)
172 move(2,up,4,5)
173 move(2,up,4,6)
174 move(2,up,4,7)
175 move(2,up,5,1)
176 move(2,up,5,2)
177 move(2,up,5,3)
178 move(2,up,5,4)
179 move(2,up,5,5)
180 move(2,up,5,6)
181 move(2,up,5,7)
182 move(2,up,2,3)
183 move(2,up,2,4)
184 move(2,up,2,5)
185 move(2,up,1,3)
186 move(2,up,1,4)
187 move(2,up,1,5)
191 move(1,right,7,3)
192 move(1,right,7,4)
193 move(1,right,7,5)
194 move(1,right,6,3)
195 move(1,right,6,4)
196 move(1,right,6,5)
197 move(1,right,3,1)
198 move(1,right,3,2)
199 move(1,right,3,3)
200 move(1,right,3,4)
201 move(1,right,3,5)
202 move(1,right,3,6)
203 move(1,right,3,7)
204 move(1,right,4,1)
205 move(1,right,4,2)
206 move(1,right,4,3)
207 move(1,right,4,4)
208 move(1,right,4,5)
209 move(1,right,4,6)
210 move(1,right,4,7)
211 move(1,right,5,1)
212 move(1,right,5,2)
213 move(1,right,5,3)
214 move(1,right,5,4)
215 move(1,right,5,5)
216 move(1,right,5,6)
217 move(1,right,5,7)
218 move(1,right,2,3)
219 move(1,right,2,4)
220 move(1,right,2,5)
221 move(1,right,1,3)
222 move(1,right,1,4)
223 move(1,right,1,5)
224 move(1,left,7,3)
225 move(1,left,7,4)
226 move(1,left,7,5)
227 move(1,left,6,3)
228 move(1,left,6,4)
229 move(1,left,6,5)
230 move(1,left,3,1)
231 move(1,left,3,2)
232 move(1,left,3,3)
233 move(1,left,3,4)
234 move(1,left,3,5)
235 move(1,left,3,6)
236 move(1,left,3,7)
237 move(1,left,4,1)
238 move(1,left,4,2)
239 move(1,left,4,3)
240 move(1,left,4,4)
241 move(1,left,4,5)
242 move(1,left,4,6)
243 move(1,left,4,7)
244 move(1,left,5,1)
245 move(1,left,5,2)
246 move(1,left,5,3)
247 move(1,left,5,4)
248 move(1,left,5,5)
249 move(1,left,5,6)
250 move(1,left,5,7)
251 move(1,left,2,3)
252 move(1,left,2,4)
253 move(1,left,2,5)
254 move(1,left,1,3)
255 move(1,left,1,4)
256 move(1,left,1,5)
257 move(1,down,7,3)
258 move(1,down,7,4)
259 move(1,down,7,5)
260 move(1,down,6,3)
261 move(1,down,6,4)
262 move(1,down,6,5)
263 move(1,down,3,1)
264 move(1,down,3,2)
265 move(1,down,3,3)
266 move(1,down,3,4)
267 move(1,down,3,5)
268 move(1,down,3,6)
269 move(1,down,3,7)
270 move(1,down,4,1)
271 move(1,down,4,2)
272 move(1,down,4,3)
273 move(1,down,4,4)
274 move(1,down,4,5)
275 move(1,down,4,6)
276 move(1,down,4,7)
277 move(1,down,5,1)
278 move(1,down,5,2)
279 move(1,down,5,3)
280 move(1,down,5,4)
281 move(1,down,5,5)
282 move(1,down,5,6)
283 move(1,down,5,7)
284 move(1,down,2,3)
285 move(1,down,2,4)
286 move(1,down,2,5)
287 move(1,down,1,3)
288 move(1,down,1,4)
289 move(1,down,1,5)
290 move(1,up,7,3)
291 move(1,up,7,4)
292 move(1,up,7,5)
293 move(1,up,6,3)
294 move(1,up,6,4)
295 move(1,up,6,5)
296 move(1,up,3,1)
297 move(1,up,3,2)
298 move(1,up,3,3)
299 move(1,up,3,4)
300 move(1,up,3,5)
301 move(1,up,3,6)
302 move(1,up,3,7)
303 move(1,up,4,1)
304 move(1,up,4,2)
305 move(1,up,4,3)
306 move(1,up,4,4)
307 move(1,up,4,5)
308 move(1,up,4,6)
309 move(1,up,4,7)
310 move(1,up,5,1)
311 move(1,up,5,2)
312 move(1,up,5,3)
313 move(1,up,5,4)
314 move(1,up,5,5)
315 move(1,up,5,6)
316 move(1,up,5,7)
317 move(1,up,2,3)
318 move(1,up,2,4)
319 move(1,up,2,5)
320 move(1,up,1,3)
321 move(1,up,1,4)
322 move(1,up,1,5)
3 direction(up)
4 direction(down)
5 direction(left)
6 direction(right)
9 full(4,2)
10 full(3,3)
11 full(4,3)
12 full(2,4)
13 full(3,4)
326 state(3,empty,1,5)
327 state(3,empty,1,4)
328 state(3,empty,1,3)
329 state(3,empty,2,5)
330 state(3,empty,2,4)
331 state(3,empty,2,3)
332 state(3,empty,5,7)
333 state(3,empty,5,6)
334 state(3,empty,5,5)
335 state(3,empty,5,4)
336 state(3,empty,5,3)
337 state(3,empty,5,2)
338 state(3,empty,5,1)
339 state(3,empty,4,7)
340 state(3,empty,4,6)
341 state(3,empty,4,5)
342 state(3,empty,4,4)
343 state(3,empty,4,3)
344 state(3,empty,4,2)
345 state(3,empty,4,1)
346 state(3,empty,3,7)
347 state(3,empty,3,6)
348 state(3,empty,3,5)
349 state(3,empty,3,4)
350 state(3,empty,3,3)
351 state(3,empty,3,2)
352 state(3,empty,3,1)
353 state(3,empty,6,5)
354 state(3,empty,6,4)
355 state(3,empty,6,3)
356 state(3,empty,7,5)
357 state(3,empty,7,4)
358 state(3,empty,7,3)
359 state(2,empty,1,5)
360 state(2,empty,1,4)
361 state(2,empty,1,3)
362 state(2,empty,2,5)
363 state(2,empty,2,4)
364 state(2,empty,2,3)
365 state(2,empty,5,7)
366 state(2,empty,5,6)
367 state(2,empty,5,5)
368 state(2,empty,5,4)
369 state(2,empty,5,3)
370 state(2,empty,5,2)
371 state(2,empty,5,1)
372 state(2,empty,4,7)
373 state(2,empty,4,6)
374 state(2,empty,4,5)
375 state(2,empty,4,4)
376 state(2,empty,4,3)
377 state(2,empty,4,2)
378 state(2,empty,4,1)
379 state(2,empty,3,7)
380 state(2,empty,3,6)
381 state(2,empty,3,5)
382 state(2,empty,3,4)
383 state(2,empty,3,3)
384 state(2,empty,3,2)
385 state(2,empty,3,1)
386 state(2,empty,6,5)
387 state(2,empty,6,4)
388 state(2,empty,6,3)
389 state(2,empty,7,5)
390 state(2,empty,7,4)
391 state(2,empty,7,3)
392 state(3,full,1,3)
393 state(3,full,2,3)
394 state(3,full,5,5)
395 state(3,full,5,4)
396 state(3,full,5,3)
397 state(3,full,5,2)
398 state(3,full,5,1)
399 state(3,full,4,5)
400 state(3,full,4,4)
401 state(3,full,4,3)
402 state(3,full,4,2)
403 state(3,full,4,1)
404 state(3,full,3,5)
405 state(3,full,3,4)
406 state(3,full,3,3)
407 state(3,full,3,2)
408 state(3,full,3,1)
409 state(3,full,6,3)
410 state(3,full,7,3)
411 state(2,full,1,3)
412 state(2,full,2,3)
413 state(2,full,5,5)
414 state(2,full,5,4)
415 state(2,full,5,3)
416 state(2,full,5,2)
417 state(2,full,5,1)
418 state(2,full,4,5)
419 state(2,full,4,4)
420 state(2,full,4,3)
421 state(2,full,4,2)
422 state(2,full,4,1)
423 state(2,full,3,5)
424 state(2,full,3,4)
425 state(2,full,3,3)
426 state(2,full,3,2)
427 state(2,full,3,1)
428 state(2,full,6,3)
429 state(2,full,7,3)
430 state(3,full,1,5)
431 state(3,full,2,5)
432 state(3,full,5,7)
433 state(3,full,5,6)
434 state(3,full,4,7)
435 state(3,full,4,6)
436 state(3,full,3,7)
437 state(3,full,3,6)
438 state(3,full,6,5)
439 state(3,full,7,5)
440 state(2,full,1,5)
441 state(2,full,2,5)
442 state(2,full,5,7)
443 state(2,full,5,6)
444 state(2,full,4,7)
445 state(2,full,4,6)
446 state(2,full,3,7)
447 state(2,full,3,6)
448 state(2,full,6,5)
449 state(2,full,7,5)
450 state(3,full,2,4)
451 state(3,full,1,4)
452 state(2,full,2,4)
453 state(2,full,1,4)
454 state(3,full,7,4)
455 state(3,full,6,4)
456 state(2,full,7,4)
457 state(2,full,6,4)
524 state(1,full,3,4)
525 state(1,full,2,4)
526 state(1,full,4,3)
527 state(1,full,3,3)
528 state(1,full,4,2)
529 state(1,empty,4,4)
7 status(full)
8 status(empty)
458 changed(3,1,5)
459 changed(3,1,4)
460 changed(3,1,3)
461 changed(3,2,5)
462 changed(3,2,4)
463 changed(3,2,3)
464 changed(3,5,7)
465 changed(3,5,6)
466 changed(3,5,5)
467 changed(3,5,4)
468 changed(3,5,3)
469 changed(3,5,2)
470 changed(3,5,1)
471 changed(3,4,7)
472 changed(3,4,6)
473 changed(3,4,5)
474 changed(3,4,4)
475 changed(3,4,3)
476 changed(3,4,2)
477 changed(3,4,1)
478 changed(3,3,7)
479 changed(3,3,6)
480 changed(3,3,5)
481 changed(3,3,4)
482 changed(3,3,3)
483 changed(3,3,2)
484 changed(3,3,1)
485 changed(3,6,5)
486 changed(3,6,4)
487 changed(3,6,3)
488 changed(3,7,5)
489 changed(3,7,4)
490 changed(3,7,3)
491 changed(2,1,5)
492 changed(2,1,4)
493 changed(2,1,3)
494 changed(2,2,5)
495 changed(2,2,4)
496 changed(2,2,3)
497 changed(2,5,7)
498 changed(2,5,6)
499 changed(2,5,5)
500 changed(2,5,4)
501 changed(2,5,3)
502 changed(2,5,2)
503 changed(2,5,1)
504 changed(2,4,7)
505 changed(2,4,6)
506 changed(2,4,5)
507 changed(2,4,4)
508 changed(2,4,3)
509 changed(2,4,2)
510 changed(2,4,1)
511 changed(2,3,7)
512 changed(2,3,6)
513 changed(2,3,5)
514 changed(2,3,4)
515 changed(2,3,3)
516 changed(2,3,2)
517 changed(2,3,1)
518 changed(2,6,5)
519 changed(2,6,4)
520 changed(2,6,3)
521 changed(2,7,5)
522 changed(2,7,4)
523 changed(2,7,3)
54 time(1)
55 time(2)
14 empty(4,4)
530 possibleMove(2,up,7,5)
531 possibleMove(2,up,6,5)
532 possibleMove(2,up,3,3)
533 possibleMove(2,up,3,4)
534 possibleMove(2,up,3,5)
535 possibleMove(2,up,3,6)
536 possibleMove(2,up,3,7)
537 possibleMove(2,up,4,3)
538 possibleMove(2,up,4,4)
539 possibleMove(2,up,4,5)
540 possibleMove(2,up,4,6)
541 possibleMove(2,up,4,7)
542 possibleMove(2,up,5,3)
543 possibleMove(2,up,5,4)
544 possibleMove(2,up,5,5)
545 possibleMove(2,up,5,6)
546 possibleMove(2,up,5,7)
547 possibleMove(2,up,2,5)
548 possibleMove(2,up,1,5)
549 possibleMove(2,down,7,3)
550 possibleMove(2,down,6,3)
551 possibleMove(2,down,3,1)
552 possibleMove(2,down,3,2)
553 possibleMove(2,down,3,3)
554 possibleMove(2,down,3,4)
555 possibleMove(2,down,3,5)
556 possibleMove(2,down,4,1)
557 possibleMove(2,down,4,2)
558 possibleMove(2,down,4,3)
559 possibleMove(2,down,4,4)
560 possibleMove(2,down,4,5)
561 possibleMove(2,down,5,1)
562 possibleMove(2,down,5,2)
563 possibleMove(2,down,5,3)
564 possibleMove(2,down,5,4)
565 possibleMove(2,down,5,5)
566 possibleMove(2,down,2,3)
567 possibleMove(2,down,1,3)
568 possibleMove(1,down,4,2)
569 possibleMove(2,left,5,1)
570 possibleMove(2,left,5,2)
571 possibleMove(2,left,5,3)
572 possibleMove(2,left,5,4)
573 possibleMove(2,left,5,5)
574 possibleMove(2,left,5,6)
575 possibleMove(2,left,5,7)
576 possibleMove(2,left,6,3)
577 possibleMove(2,left,6,4)
578 possibleMove(2,left,6,5)
579 possibleMove(2,left,7,3)
580 possibleMove(2,left,7,4)
581 possibleMove(2,left,7,5)
582 possibleMove(2,left,4,3)
583 possibleMove(2,left,4,4)
584 possibleMove(2,left,4,5)
585 possibleMove(2,left,3,3)
586 possibleMove(2,left,3,4)
587 possibleMove(2,left,3,5)
588 possibleMove(2,right,5,3)
589 possibleMove(2,right,5,4)
590 possibleMove(2,right,5,5)
591 possibleMove(2,right,4,3)
592 possibleMove(2,right,4,4)
593 possibleMove(2,right,4,5)
594 possibleMove(2,right,1,3)
595 possibleMove(2,right,1,4)
596 possibleMove(2,right,1,5)
597 possibleMove(2,right,2,3)
598 possibleMove(2,right,2,4)
599 possibleMove(2,right,2,5)
600 possibleMove(2,right,3,1)
601 possibleMove(2,right,3,2)
602 possibleMove(2,right,3,3)
603 possibleMove(2,right,3,4)
604 possibleMove(2,right,3,5)
605 possibleMove(2,right,3,6)
606 possibleMove(2,right,3,7)
607 possibleMove(1,right,2,4)
21 location(1,5)
22 location(1,4)
23 location(1,3)
24 location(2,5)
25 location(2,4)
26 location(2,3)
27 location(5,7)
28 location(5,6)
29 location(5,5)
30 location(5,4)
31 location(5,3)
32 location(5,2)
33 location(5,1)
34 location(4,7)
35 location(4,6)
36 location(4,5)
37 location(4,4)
38 location(4,3)
39 location(4,2)
40 location(4,1)
41 location(3,7)
42 location(3,6)
43 location(3,5)
44 location(3,4)
45 location(3,3)
46 location(3,2)
47 location(3,1)
48 location(6,5)
49 location(6,4)
50 location(6,3)
51 location(7,5)
52 location(7,4)
53 location(7,3)
0
B+
0
B-
1
0
1
"""
output = """
"""
| Yarrick13/hwasp | tests/asp/AllAnswerSets/aggregates/solitaire15.test.py | Python | apache-2.0 | 40,300 |
import sys
import socket
import fcntl
import struct
import random
import os
import shutil
import subprocess
import time
import csv
import ipaddress
# Run `python3 -m unittest discover` in this dir to execute tests
default_mount_options_nfs = "nfs hard,nointr,proto=tcp,mountproto=tcp,retry=30 0 0"
default_mount_options_cifs = "dir_mode=0777,file_mode=0777,serverino,nofail,uid=1001,gid=1001,vers=3.0"
def get_ip_address():
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
try:
# doesn't even have to be reachable
s.connect(("10.255.255.255", 1))
return s.getsockname()[0]
except:
return "127.0.0.1"
def ip_as_int(ip):
return int(ipaddress.ip_address(ip))
def remove_lines_containing(file, contains):
with open(file, "r+") as file:
d = file.readlines()
file.seek(0)
for i in d:
if contains not in i and i != "\n":
file.write(i)
file.truncate()
def print_help():
print("For example 'sudo python mount.py nfs '10.20.0.1:/folder1/nfsfolder2,10.20.0.1:/folder1/nfsfolder2'")
print(
"or 'sudo python mount.py azurefiles <storage-account-name>,<share-name>,<storage-account-key>'"
)
def install_apt_package(package):
try:
print("Attempt to install {}".format(package))
subprocess.check_call(["apt", "install", package, "-y"])
print("Install completed successfully")
except subprocess.CalledProcessError as e:
print("Failed install {} error: {}".format(package, e))
raise
# main allows the the mount script to be executable
def main():
if len(sys.argv) < 3:
print("Expected arg1: 'mount_type' and arg2 'mount_data'")
print_help()
exit(1)
mount_type = str(sys.argv[1])
mount_data = str(sys.argv[2])
mount_share(mount_type, mount_data)
# mount_share allows it to be invoked from other python scripts
def mount_share(mount_type, mount_data):
if mount_type.lower() != "nfs" and mount_type.lower() != "azurefiles":
print("Expected first arg to be either 'nfs' or 'azurefiles'")
print_help()
exit(1)
if mount_data == "":
print(
"""Expected second arg to be the mounting data. For NFS, this should be a CSV of IPs/FQDNS for the NFS servers with NFSExported dirs.
For example, '10.20.0.1:/folder1/nfsfolder2,10.20.0.1:/folder1/nfsfolder2'
For azure files this should be the azure files connection details."""
)
print_help()
exit(2)
print("Mounting type: {}".format(sys.argv[1]))
print("Mounting data: {}".format(sys.argv[2]))
mount_point_permissions = 0o0777 # Todo: What permissions does this really need?
primary_mount_folder = "/media/primarynfs"
seconday_mount_folder_prefix = "/media/secondarynfs"
fstab_file_path = "/etc/fstab"
try:
# Create folder to mount to
if not os.path.exists(primary_mount_folder):
os.makedirs(primary_mount_folder)
os.chmod(primary_mount_folder, mount_point_permissions)
# Make a backup of the fstab config incase we go wrong
shutil.copy(fstab_file_path, "/etc/fstab-mountscriptbackup")
# Clear existing NFS mount data to make script idempotent
remove_lines_containing(fstab_file_path, primary_mount_folder)
remove_lines_containing(fstab_file_path, seconday_mount_folder_prefix)
if mount_type.lower() == "azurefiles":
mount_azurefiles(fstab_file_path, mount_data, primary_mount_folder)
if mount_type.lower() == "nfs":
mount_nfs(fstab_file_path, mount_data, primary_mount_folder, mount_point_permissions)
except IOError as e:
print("I/O error({0})".format(e))
exit(1)
except:
print("Unexpected error:{0}".format, sys.exc_info())
raise
print("Done editing fstab ... attempting mount")
def mount_all():
subprocess.check_call(["mount", "-a"])
retryFunc("mount shares", mount_all, 100)
def retryFunc(desc, funcToRetry, maxRetries):
# Retry mounting for a while to handle race where VM exists before storage
# or temporary issue with storage
print("Attempting, with retries, to: {}".format(desc))
retryExponentialFactor = 3
for i in range(1, maxRetries):
if i == maxRetries:
print("Failed after max retries")
exit(3)
try:
print("Attempt #{}".format(str(i)))
funcToRetry()
except subprocess.CalledProcessError as e:
print("Failed:{0}".format(e))
retry_in = i * retryExponentialFactor
print("retrying in {0}secs".format(retry_in))
time.sleep(retry_in)
continue
else:
print("Succeeded to: {0} after {1} retries".format(desc, i))
break
def mount_nfs(fstab_file_path, mount_data, primary_mount_folder, mount_point_permissions):
# # Other apt instances on the machine may be doing an install
# # this means ours will fail so we retry to ensure success
def install_nfs():
install_apt_package("nfs-common")
retryFunc("install nfs-common", install_nfs, 20)
ips = mount_data.split(",")
print("Found ips:{}".format(",".join(ips)))
# Deterministically select a primary node from the available
# servers for this vm to use. By using the ip as a seed this ensures
# re-running will get the same node as primary.
# This enables spreading the load across multiple storage servers in a cluster
# like `Avere` or `Gluster` for higher throughput.
current_ip = get_ip_address()
current_ip_int = ip_as_int(current_ip)
print("Using ip as int: {0} for random seed".format((current_ip_int)))
random.seed(current_ip_int)
random_node = random.randint(0, len(ips) - 1)
primary = ips[random_node]
ips.remove(primary)
secondarys = ips
print("Primary node selected: {}".format(primary))
print("Secondary nodes selected: {}".format(",".join(secondarys)))
with open(fstab_file_path, "a") as file:
print("Mounting primary")
file.write(
"\n{} {} {}".format(
primary.strip(), primary_mount_folder, default_mount_options_nfs
)
)
print("Mounting secondarys")
number = 0
for ip in secondarys:
number = number + 1
folder = "/media/secondarynfs" + str(number)
if not os.path.exists(folder):
os.makedirs(folder)
os.chmod(folder, mount_point_permissions)
file.write(
"\n{} {} {}".format(ip.strip(), folder, default_mount_options_nfs)
)
def mount_azurefiles(fstab_file_path, mount_data, primary_mount_folder):
# Other apt instances on the machine may be doing an install
# this means ours will fail so we retry to ensure success
def install_cifs():
install_apt_package("cifs-utils")
retryFunc("install cifs-utils", install_cifs, 20)
params = mount_data.split(",")
if len(params) != 3:
print("Wrong params for azure files mount, expected 3 as CSV")
print_help()
exit(1)
account_name = params[0]
share_name = params[1]
account_key = params[2]
with open(fstab_file_path, "a") as file:
print("Mounting primary")
file.write(
"\n//{0}.file.core.windows.net/{1} {2} cifs username={0},password={3},{4}".format(
account_name,
share_name,
primary_mount_folder,
account_key,
default_mount_options_cifs,
)
)
if __name__ == "__main__":
main()
| j-coll/opencga | opencga-app/app/cloud/azure/arm/scripts/mount.py | Python | apache-2.0 | 7,799 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import mxnet as mx
os.environ['ENABLE_MKLDNN_QUANTIZATION_TEST'] = '1'
os.environ['MXNET_SUBGRAPH_BACKEND'] = 'NONE'
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../quantization'))
from test_quantization import *
if __name__ == '__main__':
import nose
nose.runmodule()
del os.environ['ENABLE_MKLDNN_QUANTIZATION_TEST']
del os.environ['MXNET_SUBGRAPH_BACKEND']
| reminisce/mxnet | tests/python/mkl/test_quantization_mkldnn.py | Python | apache-2.0 | 1,289 |